diff --git a/.gitignore b/.gitignore index efd2361..56d7440 100644 --- a/.gitignore +++ b/.gitignore @@ -138,4 +138,4 @@ dmypy.json .vscode/ # OSX stuff: -.DS_Store \ No newline at end of file +.DS_Store diff --git a/flows/coordinatematch/__init__.py b/flows/coordinatematch/__init__.py new file mode 100644 index 0000000..34a7466 --- /dev/null +++ b/flows/coordinatematch/__init__.py @@ -0,0 +1,2 @@ +from .coordinatematch import CoordinateMatch # noqa: F401 +from .wcs import WCS2 # noqa: F401 diff --git a/flows/coordinatematch/coordinatematch.py b/flows/coordinatematch/coordinatematch.py new file mode 100644 index 0000000..a3acc95 --- /dev/null +++ b/flows/coordinatematch/coordinatematch.py @@ -0,0 +1,413 @@ +# -*- coding: utf-8 -*- +""" +Match two sets of coordinates + +.. codeauthor:: Simon Holmbo +""" +import time + +from itertools import count, islice, chain, product, zip_longest + +import numpy as np + +from astropy.coordinates.angle_utilities import angular_separation +from scipy.spatial import cKDTree as KDTree +from networkx import Graph, connected_components + +from .wcs import WCS2 + + +class CoordinateMatch(): + def __init__(self, + xy, + rd, + xy_order=None, + rd_order=None, + xy_nmax=None, + rd_nmax=None, + n_triangle_packages=10, + triangle_package_size=10000, + maximum_angle_distance=0.001, + distance_factor=1): + + self.xy, self.rd = np.array(xy), np.array(rd) + + self._xy = xy - np.mean(xy, axis=0) + self._rd = rd - np.mean(rd, axis=0) + self._rd[:, 0] *= np.cos(np.deg2rad(self.rd[:, 1])) + + xy_n, rd_n = min(xy_nmax, len(xy)), min(rd_nmax, len(rd)) + + self.i_xy = xy_order[:xy_n] if xy_order is not None else np.arange( + xy_n) + self.i_rd = rd_order[:rd_n] if rd_order is not None else np.arange( + rd_n) + + self.n_triangle_packages = n_triangle_packages + self.triangle_package_size = triangle_package_size + + self.maximum_angle_distance = maximum_angle_distance + self.distance_factor = distance_factor + + self.triangle_package_generator = self._sorted_triangle_packages() + + self.i_xy_triangles = list() + self.i_rd_triangles = list() + self.parameters = None + self.neighbours = Graph() + + self.normalizations = type( + 'Normalizations', (object, ), + dict(ra=0.0001, dec=0.0001, scale=0.002, angle=0.002)) + + self.bounds = type( + 'Bounds', (object, ), + dict(xy=self.xy.mean(axis=0), + rd=None, + radius=None, + scale=None, + angle=None)) + + def set_normalizations(self, ra=None, dec=None, scale=None, angle=None): + '''Set normalization factors in the (ra, dec, scale, angle) space. + + Defaults are: + ra = 0.0001 degrees + dec = 0.0001 degrees + scale = 0.002 log(arcsec/pixel) + angle = 0.002 radians + ''' + + if self.parameters is not None: + + raise Exception( + 'can\'t change normalization after matching is started') + + assert ra is None or 0 < ra + assert dec is None or 0 < dec + assert scale is None or 0 < scale + assert angle is None or 0 < angle + + self.normalizations.ra = ra if ra is not None else self.normalizations.ra + self.normalizations.dec = dec if dec is not None else self.normalizations.dec + self.normalizations.scale = scale if scale is not None else self.normalizations.scale + self.normalizations.angle = angle if ra is not None else self.normalizations.angle + + def set_bounds(self, + x=None, + y=None, + ra=None, + dec=None, + radius=None, + scale=None, + angle=None): + '''Set bounds for what are valid results. + + Set x, y, ra, dec and radius to specify that the x, y coordinates must be no + further that the radius [degrees] away from the ra, dec coordinates. + Set upper and lower bounds on the scale [log(arcsec/pixel)] and/or the angle + [radians] if those are known, possibly from previous observations with the + same system. + ''' + + if self.parameters is not None: + + raise Exception('can\'t change bounds after matching is started') + + if [x, y, ra, dec, radius].count(None) == 5: + + assert 0 <= ra < 360 + assert -180 <= dec <= 180 + assert 0 < radius + + self.bounds.xy = x, y + self.bounds.rd = ra, dec + self.bounds.radius = radius + + elif [x, y, ra, dec, radius].count(None) > 0: + + raise Exception('x, y, ra, dec and radius must all be specified') + + assert scale is None or 0 < scale[0] < scale[1] + assert angle is None or -np.pi <= angle[0] < angle[1] <= np.pi + + self.bounds.scale = scale if scale is not None else self.bounds.scale + self.bounds.angle = angle if angle is not None else self.bounds.angle + + def _sorted_triangles(self, pool): + + for i, c in enumerate(pool): + for i, b in enumerate(pool[:i]): + for a in pool[:i]: + + yield a, b, c + + def _sorted_product_pairs(self, p, q): + + i_p = np.argsort(np.arange(len(p))) + i_q = np.argsort(np.arange(len(q))) + + for _i_p, _i_q in sorted(product(i_p, i_q), + key=lambda idxs: sum(idxs)): + + yield p[_i_p], q[_i_q] + + def _sorted_triangle_packages(self): + + i_xy_triangle_generator = self._sorted_triangles(self.i_xy) + i_rd_triangle_generator = self._sorted_triangles(self.i_rd) + + i_xy_triangle_slice_generator = (tuple( + islice(i_xy_triangle_generator, self.triangle_package_size)) for _ in count()) + i_rd_triangle_slice_generator = (list( + islice(i_rd_triangle_generator, self.triangle_package_size)) for _ in count()) + + for n in count(step=self.n_triangle_packages): + + i_xy_triangle_slice = tuple( + filter( + None, + islice(i_xy_triangle_slice_generator, + self.n_triangle_packages))) + i_rd_triangle_slice = tuple( + filter( + None, + islice(i_rd_triangle_slice_generator, + self.n_triangle_packages))) + + if not len(i_xy_triangle_slice) and not len(i_rd_triangle_slice): + return + + i_xy_triangle_generator2 = self._sorted_triangles(self.i_xy) + i_rd_triangle_generator2 = self._sorted_triangles(self.i_rd) + + i_xy_triangle_cum = filter(None, (tuple( + islice(i_xy_triangle_generator2, self.triangle_package_size)) for _ in range(n))) + i_rd_triangle_cum = filter(None, (tuple( + islice(i_rd_triangle_generator2, self.triangle_package_size)) for _ in range(n))) + + for i_xy_triangles, i_rd_triangles in chain( + filter( + None, + chain(*zip_longest( # alternating chain + product(i_xy_triangle_slice, i_rd_triangle_cum), + product(i_xy_triangle_cum, i_rd_triangle_slice)))), + self._sorted_product_pairs(i_xy_triangle_slice, + i_rd_triangle_slice)): + yield np.array(i_xy_triangles), np.array(i_rd_triangles) + + def _get_triangle_angles(self, triangles): + + sidelengths = np.sqrt( + np.power(triangles[:, (1, 0, 0)] - triangles[:, (2, 2, 1)], + 2).sum(axis=2)) + + # law of cosines + angles = np.power(sidelengths[:, ((1, 2), (0, 2), (0, 1))], + 2).sum(axis=2) + angles -= np.power(sidelengths[:, (0, 1, 2)], 2) + angles /= 2 * sidelengths[:, ((1, 2), (0, 2), (0, 1))].prod(axis=2) + + return np.arccos(angles) + + def _solve_for_matrices(self, xy_triangles, rd_triangles): + + n = len(xy_triangles) + + A = xy_triangles - np.mean(xy_triangles, axis=1).reshape(n, 1, 2) + b = rd_triangles - np.mean(rd_triangles, axis=1).reshape(n, 1, 2) + + matrices = [ + np.linalg.lstsq(Ai, bi, rcond=None)[0].T for Ai, bi in zip(A, b) + ] + + return np.array(matrices) + + def _extract_parameters(self, xy_triangles, rd_triangles, matrices): + + parameters = [] + + for xy_com, rd_com, matrix in zip( # com -> center-of-mass + xy_triangles.mean(axis=1), rd_triangles.mean(axis=1), + matrices): + + cos_dec = np.cos(np.deg2rad(rd_com[1])) + coordinates = (self.bounds.xy - xy_com).dot(matrix) + coordinates = coordinates / np.array([cos_dec, 1]) + rd_com + + wcs = WCS2.from_matrix(*xy_com, *rd_com, matrix) + + parameters.append( + (*coordinates, np.log(wcs.scale), np.deg2rad(wcs.angle))) + + return parameters + + def _get_bounds_mask(self, parameters): + + i = np.ones(len(parameters), dtype=bool) + parameters = np.array(parameters) + + if self.bounds.radius is not None: + + i *= angular_separation( + *np.deg2rad(self.bounds.rd), + *zip(*np.deg2rad(parameters[:, (0, 1)]))) <= np.deg2rad( + self.bounds.radius) + + if self.bounds.scale is not None: + + i *= self.bounds.scale[0] <= parameters[:, 2] + i *= parameters[:, 2] <= self.bounds.scale[1] + + if self.bounds.angle is not None: + + i *= self.bounds.angle[0] <= parameters[:, 3] + i *= parameters[:, 3] <= self.bounds.angle[1] + + return i + + def __call__(self, minimum_matches=4, ratio_superiority=1, timeout=60): + '''Start the alogrithm. + + Can be run multiple times with different arguments to relax the + restrictions. + + Example + -------- + cm = CoordinateMatch(xy, rd) + + lkwargs = [{ + minimum_matches = 20, + ratio_superiority = 5, + timeout = 10 + },{ + timeout = 60 + } + + for i, kwargs in enumerate(lkwargs): + try: + i_xy, i_rd = cm(**kwargs) + except TimeoutError: + continue + except StopIteration: + print('Failed, no more stars.') + else: + print('Success with kwargs[%d].' % i) + else: + print('Failed, timeout.') + ''' + + self.parameters = list( + ) if self.parameters is None else self.parameters + + t0 = time.time() + + while time.time() - t0 < timeout: + + # get triangles and derive angles + + i_xy_triangles, i_rd_triangles = next( + self.triangle_package_generator) + + xy_angles = self._get_triangle_angles(self._xy[i_xy_triangles]) + rd_angles = self._get_triangle_angles(self._rd[i_rd_triangles]) + + # sort triangle vertices based on angles + + i = np.argsort(xy_angles, axis=1) + i_xy_triangles = np.take_along_axis(i_xy_triangles, i, axis=1) + xy_angles = np.take_along_axis(xy_angles, i, axis=1) + + i = np.argsort(rd_angles, axis=1) + i_rd_triangles = np.take_along_axis(i_rd_triangles, i, axis=1) + rd_angles = np.take_along_axis(rd_angles, i, axis=1) + + # match triangles + + matches = KDTree(xy_angles).query_ball_tree( + KDTree(rd_angles), r=self.maximum_angle_distance) + matches = np.array([(_i_xy, _i_rd) + for _i_xy, _li_rd in enumerate(matches) + for _i_rd in _li_rd]) + + if not len(matches): + continue + + i_xy_triangles = list(i_xy_triangles[matches[:, 0]]) + i_rd_triangles = list(i_rd_triangles[matches[:, 1]]) + + # get parameters of wcs solutions + + matrices = self._solve_for_matrices( + self._xy[np.array(i_xy_triangles)], + self._rd[np.array(i_rd_triangles)]) + + parameters = self._extract_parameters( + self.xy[np.array(i_xy_triangles)], + self.rd[np.array(i_rd_triangles)], matrices) + + # apply bounds if any + + if any([self.bounds.radius, self.bounds.scale, self.bounds.angle]): + + mask = self._get_bounds_mask(parameters) + + i_xy_triangles = np.array(i_xy_triangles)[mask].tolist() + i_rd_triangles = np.array(i_rd_triangles)[mask].tolist() + parameters = np.array(parameters)[mask].tolist() + + # normalize parameters + + normalization = [ + getattr(self.normalizations, v) + for v in ('ra', 'dec', 'scale', 'angle') + ] + normalization[0] *= np.cos(np.deg2rad(self.rd[:, 1].mean(axis=0))) + parameters = list(parameters / np.array(normalization)) + + # match parameters + + neighbours = KDTree(parameters).query_ball_tree( + KDTree(self.parameters + parameters), r=self.distance_factor) + neighbours = np.array([ + (i, j) for i, lj in enumerate(neighbours, len(self.parameters)) + for j in lj + ]) + neighbours = list( + neighbours[(np.diff(neighbours, axis=1) < 0).flatten()]) + + if not len(neighbours): + continue + + self.i_xy_triangles += i_xy_triangles + self.i_rd_triangles += i_rd_triangles + self.parameters += parameters + self.neighbours.add_edges_from(neighbours) + + # get largest neighborhood + + communities = list(connected_components(self.neighbours)) + c1 = np.array(list(max(communities, key=len))) + i = np.unique(np.array(self.i_xy_triangles)[c1].flatten(), + return_index=True)[1] + + if ratio_superiority > 1 and len(communities) > 1: + + communities.remove(set(c1)) + c2 = np.array(list(max(communities, key=len))) + _i = np.unique(np.array(self.i_xy_triangles)[c2].flatten()) + + if len(i) / len(_i) < ratio_superiority: + continue + + if len(i) >= minimum_matches: + break + + else: + + raise TimeoutError + + i_xy = np.array(self.i_xy_triangles)[c1].flatten()[i] + i_rd = np.array(self.i_rd_triangles)[c1].flatten()[i] + + return list(zip(i_xy, i_rd)) diff --git a/flows/coordinatematch/wcs.py b/flows/coordinatematch/wcs.py new file mode 100644 index 0000000..6d16c85 --- /dev/null +++ b/flows/coordinatematch/wcs.py @@ -0,0 +1,228 @@ +# -*- coding: utf-8 -*- +""" +WCS tools + +.. codeauthor:: Simon Holmbo +""" +from copy import deepcopy + +import numpy as np +import astropy.wcs + +from scipy.optimize import minimize +from scipy.spatial.transform import Rotation + + +class WCS2(): + '''Manipulate WCS solution. + + Initialize + ---------- + wcs = WCS2(x, y, ra, dec, scale, mirror, angle) + wcs = WCS2.from_matrix(x, y, ra, dec, matrix) + wcs = WCS2.from_points(list(zip(x, y)), list(zip(ra, dec))) + wcs = WCS2.from_astropy_wcs(astropy.wcs.WCS()) + + ra, dec and angle should be in degrees + scale should be in arcsec/pixel + matrix should be the PC or CD matrix + + Examples + -------- + Adjust x, y offset: + wcs.x += delta_x + wcs.y += delta_y + + Get scale and angle: + print(wcs.scale, wcs.angle) + + Change an astropy.wcs.WCS (wcs) angle + wcs = WCS2(wcs)(angle=new_angle).astropy_wcs + + Adjust solution with points + wcs.adjust_with_points(list(zip(x, y)), list(zip(ra, dec))) + ''' + def __init__(self, x, y, ra, dec, scale, mirror, angle): + + self.x, self.y = x, y + self.ra, self.dec = ra, dec + self.scale = scale + self.mirror = mirror + self.angle = angle + + @classmethod + def from_matrix(cls, x, y, ra, dec, matrix): + '''Initiate the class with a matrix.''' + + assert np.shape(matrix) == (2, 2), \ + 'Matrix must be 2x2' + + scale, mirror, angle = cls._decompose_matrix(matrix) + + return cls(x, y, ra, dec, scale, mirror, angle) + + @classmethod + def from_points(cls, xy, rd): + '''Initiate the class with at least pixel + sky coordinates.''' + + assert np.shape(xy) == np.shape(rd) == (len(xy), 2) and len(xy) > 2, \ + 'Arguments must be lists of at least 3 sets of coordinates' + + xy, rd = np.array(xy), np.array(rd) + + x, y, ra, dec, matrix = cls._solve_from_points(xy, rd) + scale, mirror, angle = cls._decompose_matrix(matrix) + + return cls(x, y, ra, dec, scale, mirror, angle) + + @classmethod + def from_astropy_wcs(cls, astropy_wcs): + '''Initiate the class with an astropy.wcs.WCS object.''' + + assert type(astropy_wcs) is astropy.wcs.WCS, \ + 'Must be astropy.wcs.WCS' + + (x, y), (ra, dec) = astropy_wcs.wcs.crpix, astropy_wcs.wcs.crval + scale, mirror, angle = cls._decompose_matrix( + astropy_wcs.pixel_scale_matrix) + + return cls(x, y, ra, dec, scale, mirror, angle) + + def adjust_with_points(self, xy, rd): + '''Adjust the WCS with pixel + sky coordinates. + + If one set is given the change will be a simple offset. + If two sets are given the offset, angle and scale will be derived. + And if more sets are given a completely new solution will be found. + ''' + + assert np.shape(xy) == np.shape(rd) == (len(xy), 2), \ + 'Arguments must be lists of sets of coordinates' + + xy, rd = np.array(xy), np.array(rd) + + self.x, self.y = xy.mean(axis=0) + self.ra, self.dec = rd.mean(axis=0) + + A, b = xy - xy.mean(axis=0), rd - rd.mean(axis=0) + b[:, 0] *= np.cos(np.deg2rad(rd[:, 1])) + + if len(xy) == 2: + + M = np.diag([[-1, 1][self.mirror], 1]) + + def R(t): + return np.array([[np.cos(t), -np.sin(t)], + [np.sin(t), np.cos(t)]]) + + def chi2(x): + return np.power( + A.dot(x[1] / 60 / 60 * R(x[0]).dot(M).T) - b, 2).sum() + self.angle, self.scale = minimize(chi2, [self.angle, self.scale]).x + + elif len(xy) > 2: + + matrix = np.linalg.lstsq(A, b, rcond=None)[0].T + self.scale, self.mirror, self.angle = self._decompose_matrix( + matrix) + + @property + def matrix(self): + + scale = self.scale / 60 / 60 + mirror = np.diag([[-1, 1][self.mirror], 1]) + angle = np.deg2rad(self.angle) + + matrix = np.array([[np.cos(angle), -np.sin(angle)], + [np.sin(angle), np.cos(angle)]]) + + return scale * matrix @ mirror + + @property + def astropy_wcs(self): + + wcs = astropy.wcs.WCS() + wcs.wcs.crpix = self.x, self.y + wcs.wcs.crval = self.ra, self.dec + wcs.wcs.pc = self.matrix + + return wcs + + @staticmethod + def _solve_from_points(xy, rd): + + (x, y), (ra, dec) = xy.mean(axis=0), rd.mean(axis=0) + + A, b = xy - xy.mean(axis=0), rd - rd.mean(axis=0) + b[:, 0] *= np.cos(np.deg2rad(rd[:, 1])) + + matrix = np.linalg.lstsq(A, b, rcond=None)[0].T + + return x, y, ra, dec, matrix + + @staticmethod + def _decompose_matrix(matrix): + + scale = np.sqrt(np.power(matrix, 2).sum() / 2) * 60 * 60 + + if np.argmax(np.power(matrix[0], 2)): + mirror = True if np.sign(matrix[0, 1]) != np.sign( + matrix[1, 0]) else False + else: + mirror = True if np.sign(matrix[0, 0]) == np.sign( + matrix[1, 1]) else False + + matrix = matrix if mirror else matrix.dot(np.diag([-1, 1])) + + matrix3d = np.eye(3) + matrix3d[:2, :2] = matrix / (scale / 60 / 60) + angle = Rotation.from_matrix(matrix3d).as_euler('xyz', degrees=True)[2] + + return scale, mirror, angle + + def __setattr__(self, name, value): + + if name == 'ra': + + assert 0 <= value < 360, '0 <= R.A. < 360' + + elif name == 'dec': + + assert -180 <= value <= 180, '-180 <= Dec. <= 180' + + elif name == 'scale': + + assert value > 0, 'Scale > 0' + + elif name == 'mirror': + + assert type(value) is bool, 'Mirror = True | False' + + elif name == 'angle': + + assert -180 < value <= 180, '-180 < Angle <= 180' + + super().__setattr__(name, value) + + def __call__(self, **kwargs): + '''Make a copy with, or a copy with changes.''' + + keys = ('x', 'y', 'ra', 'dec', 'scale', 'mirror', 'angle') + + if not all(k in keys for k in kwargs): + + raise Exception('unknown argument(s)') + + obj = deepcopy(self) + + for k, v in kwargs.items(): + + obj.__setattr__(k, v) + + return obj + + def __repr__(self): + + ra, dec = self.astropy_wcs.wcs_pix2world([(0, 0)], 0)[0] + + return f'WCS2(0, 0, {ra:.4f}, {dec:.4f}, {self.scale:.2f}, {self.mirror}, {self.angle:.2f})' diff --git a/flows/epsfbuilder/__init__.py b/flows/epsfbuilder/__init__.py new file mode 100644 index 0000000..dfa50fe --- /dev/null +++ b/flows/epsfbuilder/__init__.py @@ -0,0 +1 @@ +from .epsfbuilder import EPSFBuilder # noqa: F401 diff --git a/flows/epsfbuilder/epsfbuilder.py b/flows/epsfbuilder/epsfbuilder.py new file mode 100644 index 0000000..1b7bf29 --- /dev/null +++ b/flows/epsfbuilder/epsfbuilder.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +""" +Photutils hack for EPSF building + +.. codeauthor:: Simon Holmbo +""" +import time +import numpy as np +from scipy.interpolate import griddata +import photutils.psf + +class EPSFBuilder(photutils.psf.EPSFBuilder): + def _create_initial_epsf(self, stars): + + epsf = super()._create_initial_epsf(stars) + epsf.origin = None + + X, Y = np.meshgrid(*map(np.arange, epsf.shape[::-1])) + + X = X / epsf.oversampling[0] - epsf.x_origin + Y = Y / epsf.oversampling[1] - epsf.y_origin + + self._epsf_xy_grid = X, Y + + return epsf + + def _resample_residual(self, star, epsf): + + #max_dist = .5 / np.sqrt(np.sum(np.power(epsf.oversampling, 2))) + + #star_points = list(zip(star._xidx_centered, star._yidx_centered)) + #epsf_points = list(zip(*map(np.ravel, self._epsf_xy_grid))) + + #star_tree = cKDTree(star_points) + #dd, ii = star_tree.query(epsf_points, distance_upper_bound=max_dist) + #mask = np.isfinite(dd) + + #star_data = np.full_like(epsf.data, np.nan) + #star_data.ravel()[mask] = star._data_values_normalized[ii[mask]] + + star_points = list(zip(star._xidx_centered, star._yidx_centered)) + star_data = griddata(star_points, star._data_values_normalized, + self._epsf_xy_grid) + + return star_data - epsf._data + + def __call__(self, *args, **kwargs): + + t0 = time.time() + + epsf, stars = super().__call__(*args, **kwargs) + + epsf.fit_info = dict( + n_iter=len(self._epsf), + max_iters=self.maxiters, + time=time.time() - t0, + ) + + return epsf, stars diff --git a/flows/load_image.py b/flows/load_image.py index fd7059b..cd80436 100644 --- a/flows/load_image.py +++ b/flows/load_image.py @@ -76,6 +76,7 @@ def load_image(FILENAME): # get image and wcs solution with fits.open(FILENAME, mode='readonly') as hdul: + hdr = hdul[0].header origin = hdr.get('ORIGIN') telescope = hdr.get('TELESCOP') @@ -84,6 +85,8 @@ def load_image(FILENAME): image.image = np.asarray(hdul[0].data, dtype='float64') image.shape = image.image.shape + image.header = hdr + if origin == 'LCOGT': image.mask = np.asarray(hdul['BPM'].data, dtype='bool') else: diff --git a/flows/photometry.py b/flows/photometry.py index 4269c6d..719f27f 100644 --- a/flows/photometry.py +++ b/flows/photometry.py @@ -5,739 +5,835 @@ .. codeauthor:: Rasmus Handberg .. codeauthor:: Emir Karamehmetoglu +.. codeauthor:: Simon Holmbo """ - import os import numpy as np -from bottleneck import nansum, nanmedian, nanmax, allnan, replace +from bottleneck import nansum, allnan, replace +import sep from timeit import default_timer import logging import warnings -from copy import deepcopy - -from astropy.utils.exceptions import AstropyDeprecationWarning +from astropy.utils.exceptions import AstropyDeprecationWarning, AstropyUserWarning, ErfaWarning import astropy.units as u import astropy.coordinates as coords -from astropy.stats import sigma_clip, SigmaClip, gaussian_fwhm_to_sigma +from astropy.stats import sigma_clip, SigmaClip from astropy.table import Table, vstack from astropy.nddata import NDData from astropy.modeling import models, fitting -from astropy.wcs.utils import proj_plane_pixel_area +from astropy.wcs.utils import proj_plane_pixel_area, fit_wcs_from_points from astropy.time import Time warnings.simplefilter('ignore', category=AstropyDeprecationWarning) -from photutils import DAOStarFinder, CircularAperture, CircularAnnulus, aperture_photometry -from photutils.psf import EPSFBuilder, EPSFFitter, BasicPSFPhotometry, DAOGroup, extract_stars -from photutils import Background2D, SExtractorBackground -from photutils.utils import calc_total_error - -from scipy.interpolate import UnivariateSpline - -from . import api -from .config import load_config -from .plots import plt, plot_image -from .version import get_version -from .load_image import load_image -from .run_imagematch import run_imagematch -from .zeropoint import bootstrap_outlier, sigma_from_Chauvenet +from photutils import CircularAperture, CircularAnnulus, aperture_photometry # noqa: E402 +from photutils.psf import EPSFFitter, BasicPSFPhotometry, DAOGroup, extract_stars # noqa: E402 +from photutils import Background2D, SExtractorBackground, MedianBackground # noqa: E402 +from photutils.utils import calc_total_error # noqa: E402 + +from scipy.interpolate import UnivariateSpline # noqa: E402 + +from . import api # noqa: E402 +from .config import load_config # noqa: E402 +from .plots import plt, plot_image # noqa: E402 +from .version import get_version # noqa: E402 +from .load_image import load_image # noqa: E402 +from .run_imagematch import run_imagematch # noqa: E402 +from .zeropoint import bootstrap_outlier, sigma_from_Chauvenet # noqa: E402 +from .wcs import force_reject_g2d, clean_with_rsq_and_get_fwhm, get_clean_references # noqa: E402 +from .coordinatematch import CoordinateMatch, WCS2 # noqa: E402 +from .epsfbuilder import EPSFBuilder # noqa: E402 __version__ = get_version(pep440=False) warnings.simplefilter('ignore', category=AstropyDeprecationWarning) -#-------------------------------------------------------------------------------------------------- -def photometry(fileid, output_folder=None, attempt_imagematch=True): - """ - Run photometry. - - Parameters: - fileid (int): File ID to process. - output_folder (str, optional): Path to directory where output should be placed. - attempt_imagematch (bool, optional): If no subtracted image is available, but a - template image is, should we attempt to run ImageMatch using standard settings. - Default=True. - - .. codeauthor:: Rasmus Handberg - """ - - # Settings: - #ref_mag_limit = 22 # Lower limit on reference target brightness - ref_target_dist_limit = 10 * u.arcsec # Reference star must be further than this away to be included - - logger = logging.getLogger(__name__) - tic = default_timer() - - # Use local copy of archive if configured to do so: - config = load_config() - - # Get datafile dict from API: - datafile = api.get_datafile(fileid) - logger.debug("Datafile: %s", datafile) - targetid = datafile['targetid'] - target_name = datafile['target_name'] - photfilter = datafile['photfilter'] - - archive_local = config.get('photometry', 'archive_local', fallback=None) - if archive_local is not None: - datafile['archive_path'] = archive_local - if not os.path.isdir(datafile['archive_path']): - raise FileNotFoundError("ARCHIVE is not available: " + datafile['archive_path']) - - # Get the catalog containing the target and reference stars: - # TODO: Include proper-motion to the time of observation - catalog = api.get_catalog(targetid, output='table') - target = catalog['target'][0] - target_coord = coords.SkyCoord(ra=target['ra'], dec=target['decl'], unit='deg', frame='icrs') - - # Folder to save output: - if output_folder is None: - output_folder_root = config.get('photometry', 'output', fallback='.') - output_folder = os.path.join(output_folder_root, target_name, '%05d' % fileid) - logger.info("Placing output in '%s'", output_folder) - os.makedirs(output_folder, exist_ok=True) - - # The paths to the science image: - filepath = os.path.join(datafile['archive_path'], datafile['path']) - - # TODO: Download datafile using API to local drive: - # TODO: Is this a security concern? - #if archive_local: - # api.download_datafile(datafile, archive_local) - - # Translate photometric filter into table column: - ref_filter = { - 'up': 'u_mag', - 'gp': 'g_mag', - 'rp': 'r_mag', - 'ip': 'i_mag', - 'zp': 'z_mag', - 'B': 'B_mag', - 'V': 'V_mag', - 'J': 'J_mag', - 'H': 'H_mag', - 'K': 'K_mag', - }.get(photfilter, None) - - if ref_filter is None: - logger.warning("Could not find filter '%s' in catalogs. Using default gp filter.", photfilter) - ref_filter = 'g_mag' - - references = catalog['references'] - references.sort(ref_filter) - - # Check that there actually are reference stars in that filter: - if allnan(references[ref_filter]): - raise ValueError("No reference stars found in current photfilter.") - - # Load the image from the FITS file: - image = load_image(filepath) - - #============================================================================================== - # BARYCENTRIC CORRECTION OF TIME - #============================================================================================== - - ltt_bary = image.obstime.light_travel_time(target_coord, ephemeris='jpl') - image.obstime = image.obstime.tdb + ltt_bary - - #============================================================================================== - # BACKGROUND ESTIMATION - #============================================================================================== - - fig, ax = plt.subplots(1, 2, figsize=(20, 18)) - plot_image(image.clean, ax=ax[0], scale='log', cbar='right', title='Image') - plot_image(image.mask, ax=ax[1], scale='linear', cbar='right', title='Mask') - fig.savefig(os.path.join(output_folder, 'original.png'), bbox_inches='tight') - plt.close(fig) - - # Estimate image background: - # Not using image.clean here, since we are redefining the mask anyway - bkg = Background2D(image.clean, (128, 128), filter_size=(5, 5), - sigma_clip=SigmaClip(sigma=3.0), - bkg_estimator=SExtractorBackground(), - exclude_percentile=50.0) - image.background = bkg.background - image.std = bkg.background_rms_median - - # Create background-subtracted image: - image.subclean = image.clean - image.background - - # Plot background estimation: - fig, ax = plt.subplots(1, 3, figsize=(20, 6)) - plot_image(image.clean, ax=ax[0], scale='log', title='Original') - plot_image(image.background, ax=ax[1], scale='log', title='Background') - plot_image(image.subclean, ax=ax[2], scale='log', title='Background subtracted') - fig.savefig(os.path.join(output_folder, 'background.png'), bbox_inches='tight') - plt.close(fig) - - # TODO: Is this correct?! - image.error = calc_total_error(image.clean, bkg.background_rms, 1.0) - - #============================================================================================== - # DETECTION OF STARS AND MATCHING WITH CATALOG - #============================================================================================== - - # Account for proper motion: - # TODO: Are catalog RA-proper motions including cosdec? - replace(references['pm_ra'], np.NaN, 0) - replace(references['pm_dec'], np.NaN, 0) - refs_coord = coords.SkyCoord(ra=references['ra'], dec=references['decl'], - pm_ra_cosdec=references['pm_ra'], pm_dec=references['pm_dec'], - unit='deg', frame='icrs', obstime=Time(2015.5, format='decimalyear')) - - refs_coord = refs_coord.apply_space_motion(image.obstime) - - # Calculate pixel-coordinates of references: - row_col_coords = image.wcs.all_world2pix(np.array([[ref.ra.deg, ref.dec.deg] for ref in refs_coord]), 0) - references['pixel_column'] = row_col_coords[:,0] - references['pixel_row'] = row_col_coords[:,1] - - # Calculate the targets position in the image: - target_pixel_pos = image.wcs.all_world2pix([[target['ra'], target['decl']]], 0)[0] - - # Clean out the references: - hsize = 10 - x = references['pixel_column'] - y = references['pixel_row'] - references = references[(target_coord.separation(refs_coord) > ref_target_dist_limit) - & (x > hsize) & (x < (image.shape[1] - 1 - hsize)) - & (y > hsize) & (y < (image.shape[0] - 1 - hsize))] - # & (references[ref_filter] < ref_mag_limit) - - logger.info("References:\n%s", references) - - radius = 10 - fwhm_guess = 6.0 - fwhm_min = 3.5 - fwhm_max = 18.0 - - # Set up 2D Gaussian model for fitting to reference stars: - g2d = models.Gaussian2D(amplitude=1.0, x_mean=radius, y_mean=radius, x_stddev=fwhm_guess*gaussian_fwhm_to_sigma) - g2d.amplitude.bounds = (0.1, 2.0) - g2d.x_mean.bounds = (0.5*radius, 1.5*radius) - g2d.y_mean.bounds = (0.5*radius, 1.5*radius) - g2d.x_stddev.bounds = (fwhm_min * gaussian_fwhm_to_sigma, fwhm_max * gaussian_fwhm_to_sigma) - g2d.y_stddev.tied = lambda model: model.x_stddev - g2d.theta.fixed = True - - gfitter = fitting.LevMarLSQFitter() - - fwhms = np.full(len(references), np.NaN) - gauss_pos = np.full([len(references), 2], np.NaN) - rsqs = np.full(len(references), np.NaN) - for i, (x, y) in enumerate(zip(references['pixel_column'], references['pixel_row'])): - x = int(np.round(x)) - y = int(np.round(y)) - xmin = max(x - radius, 0) - xmax = min(x + radius + 1, image.shape[1]) - ymin = max(y - radius, 0) - ymax = min(y + radius + 1, image.shape[0]) - - curr_star = deepcopy(image.subclean[ymin:ymax, xmin:xmax]) - - edge = np.zeros_like(curr_star, dtype='bool') - edge[(0,-1),:] = True - edge[:,(0,-1)] = True - curr_star -= nanmedian(curr_star[edge]) - curr_star /= nanmax(curr_star) - - ypos, xpos = np.mgrid[:curr_star.shape[0], :curr_star.shape[1]] - gfit = gfitter(g2d, x=xpos, y=ypos, z=curr_star) - - fwhms[i] = gfit.x_fwhm - gauss_pos[i, :] = [xmin + gfit.x_mean, ymin + gfit.y_mean] - - # Calculate rsq - sstot = ((curr_star - curr_star.mean()) ** 2).sum() - sserr = (gfitter.fit_info['fvec'] ** 2).sum() - rsqs[i] = 1.0 - sserr/sstot - - masked_fwhms = np.ma.masked_array(fwhms, ~np.isfinite(fwhms)) - masked_rsqs = np.ma.masked_array(rsqs, ~np.isfinite(rsqs)) - - # Create plot of target and reference star positions from 2D Gaussian fits. - fig, ax = plt.subplots(1, 1, figsize=(20, 18)) - plot_image(image.subclean, ax=ax, scale='log', cbar='right', title=target_name) - ax.scatter(gauss_pos[:, 0], gauss_pos[:, 1], c='r', marker='o', alpha=0.6, label='2D Gauss') - ax.scatter(references['pixel_column'], references['pixel_row'], c='g', marker='o', alpha=0.6, label='References') - ax.scatter(target_pixel_pos[0], target_pixel_pos[1], marker='+', s=20, c='r', label='Target') - ax.legend() - fig.savefig(os.path.join(output_folder, 'positions_2dgauss.png'), bbox_inches='tight') - plt.close(fig) - - # Use R^2 to more robustly determine initial FWHM guess. - # This cleaning is good when we have FEW references. - min_fwhm_references = 2 - min_references = 6 - min_references_now = min_references - rsq_min = 0.15 - rsqvals = np.arange(0.95, rsq_min, -0.15) - fwhm_found = False - min_references_achieved = False - - # Clean based on R^2 Value - while not min_references_achieved: - for rsqval in rsqvals: - mask = (masked_rsqs >= rsqval) & (masked_rsqs < 1.0) - nreferences = np.sum(np.isfinite(masked_fwhms[mask])) - if nreferences >= min_fwhm_references: - _fwhms_cut_ = np.mean(sigma_clip(masked_fwhms[mask], maxiters=100, sigma=2.0)) - logger.debug('R^2 >= %.2f: %d stars with mean FWHM = %.2f', - rsqval, nreferences, _fwhms_cut_) - if not fwhm_found: - fwhm = _fwhms_cut_ - fwhm_found = True - if nreferences >= min_references_now: - references = references[mask] - min_references_achieved = True - break - - if min_references_achieved: - break - min_references_now -= 2 - if fwhm_found and min_references_now < 2: - break - elif not fwhm_found: - raise Exception("Could not estimate FWHM") - logger.debug('%d, %d, #refs = %d', min_references_now, min_fwhm_references, nreferences) - - logger.info("FWHM: %f", fwhm) - if np.isnan(fwhm): - raise Exception("Could not estimate FWHM") - - # if minimum references not found, then take what we can get with even a weaker cut. - # TODO: Is this right, or should we grab rsq_min (or even weaker?) - min_references_now = min_references - 2 - while not min_references_achieved: - mask = (masked_rsqs >= rsq_min) & (masked_rsqs < 1.0) - nreferences = np.sum(np.isfinite(masked_fwhms[mask])) - if nreferences >= min_references_now: - references = references[mask] - min_references_achieved = True - rsq_min -= 0.07 - min_references_now -= 1 - - # Check len of references as this is a destructive cleaning. - if len(references) == 2: - logger.warning('Only two reference stars remaining. Check WCS and image quality.') - elif len(references) < 2: - raise Exception("{0} References remaining; could not clean.".format(len(references))) - - # This cleaning is good when we have MANY references. - # Use DAOStarFinder to search the image for stars, and only use reference-stars where a - # star was actually detected close to the references-star coordinate: - cleanout_references = (len(references) > 6) - logger.debug("Number of references before cleaning: %d", len(references)) - if cleanout_references: - # Arguments for DAOStarFind, starting with the strictest and ending with the - # least strict settings to try: - # We will stop with the first set that yield more than the minimum number - # of reference stars. - daofind_args = [{ - 'threshold': 7 * image.std, - 'fwhm': fwhm, - 'exclude_border': True, - 'sharphi': 0.8, - 'sigma_radius': 1.1, - 'peakmax': image.peakmax - }, { - 'threshold': 3 * image.std, - 'fwhm': fwhm, - 'roundlo': -0.5, - 'roundhi': 0.5 - }] - - # Loop through argument sets for DAOStarFind: - for kwargs in daofind_args: - # Run DAOStarFind with the given arguments: - daofind_tbl = DAOStarFinder(**kwargs).find_stars(image.subclean, mask=image.mask) - - # Match the found stars with the catalog references: - indx_good = np.zeros(len(references), dtype='bool') - for k, ref in enumerate(references): - dist = np.sqrt( (daofind_tbl['xcentroid'] - ref['pixel_column'])**2 + (daofind_tbl['ycentroid'] - ref['pixel_row'])**2 ) - if np.any(dist <= fwhm/4): # Cutoff set somewhat arbitrary - indx_good[k] = True - - logger.debug("Number of references after cleaning: %d", np.sum(indx_good)) - if np.sum(indx_good) >= min_references: - references = references[indx_good] - break - - logger.debug("Number of references after cleaning: %d", len(references)) - - # Create plot of target and reference star positions: - fig, ax = plt.subplots(1, 1, figsize=(20, 18)) - plot_image(image.subclean, ax=ax, scale='log', cbar='right', title=target_name) - ax.scatter(references['pixel_column'], references['pixel_row'], c='r', marker='o', alpha=0.6) - if cleanout_references: - ax.scatter(daofind_tbl['xcentroid'], daofind_tbl['ycentroid'], c='g', marker='o', alpha=0.6) - ax.scatter(target_pixel_pos[0], target_pixel_pos[1], marker='+', s=20, c='r') - fig.savefig(os.path.join(output_folder, 'positions.png'), bbox_inches='tight') - plt.close(fig) - - #============================================================================================== - # CREATE EFFECTIVE PSF MODEL - #============================================================================================== - - # Make cutouts of stars using extract_stars: - # Scales with FWHM - size = int(np.round(29*fwhm/6)) - if size % 2 == 0: - size += 1 # Make sure it's a uneven number - size = max(size, 15) # Never go below 15 pixels - hsize = (size - 1) / 2 - - x = references['pixel_column'] - y = references['pixel_row'] - mask_near_edge = ((x > hsize) & (x < (image.shape[1] - 1 - hsize)) - & (y > hsize) & (y < (image.shape[0] - 1 - hsize))) - - stars_for_epsf = Table() - stars_for_epsf['x'] = x[mask_near_edge] - stars_for_epsf['y'] = y[mask_near_edge] - - # Store which stars were used in ePSF in the table: - logger.info("Number of stars used for ePSF: %d", len(stars_for_epsf)) - references['used_for_epsf'] = mask_near_edge - - # Extract stars sub-images: - stars = extract_stars( - NDData(data=image.subclean, mask=image.mask), - stars_for_epsf, - size=size - ) - - # Plot the stars being used for ePSF: - nrows = 5 - ncols = 5 - imgnr = 0 - for k in range(int(np.ceil(len(stars_for_epsf)/(nrows*ncols)))): - fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=(20, 20), squeeze=True) - ax = ax.ravel() - for i in range(nrows*ncols): - if imgnr > len(stars_for_epsf)-1: - ax[i].axis('off') - else: - plot_image(stars[imgnr], ax=ax[i], scale='log', cmap='viridis') - imgnr += 1 - - fig.savefig(os.path.join(output_folder, 'epsf_stars%02d.png' % (k+1)), bbox_inches='tight') - plt.close(fig) - - # Build the ePSF: - epsf = EPSFBuilder( - oversampling=1.0, - maxiters=500, - fitter=EPSFFitter(fit_boxsize=2*fwhm), - progress_bar=True - )(stars)[0] - - logger.info('Successfully built PSF model') - - fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 15)) - plot_image(epsf.data, ax=ax1, cmap='viridis') - - fwhms = [] - bad_epsf_detected = False - for a, ax in ((0, ax3), (1, ax2)): - # Collapse the PDF along this axis: - profile = epsf.data.sum(axis=a) - itop = profile.argmax() - poffset = profile[itop]/2 - - # Run a spline through the points, but subtract half of the peak value, and find the roots: - # We have to use a cubic spline, since roots() is not supported for other splines - # for some reason - profile_intp = UnivariateSpline(np.arange(0, len(profile)), profile - poffset, k=3, s=0, ext=3) - lr = profile_intp.roots() - - # Plot the profile and spline: - x_fine = np.linspace(-0.5, len(profile)-0.5, 500) - ax.plot(profile, 'k.-') - ax.plot(x_fine, profile_intp(x_fine) + poffset, 'g-') - ax.axvline(itop) - ax.set_xlim(-0.5, len(profile)-0.5) - - # Do some sanity checks on the ePSF: - # It should pass 50% exactly twice and have the maximum inside that region. - # I.e. it should be a single gaussian-like peak - if len(lr) != 2 or itop < lr[0] or itop > lr[1]: - logger.error("Bad PSF along axis %d", a) - bad_epsf_detected = True - else: - axis_fwhm = lr[1] - lr[0] - fwhms.append(axis_fwhm) - ax.axvspan(lr[0], lr[1], facecolor='g', alpha=0.2) - - # Save the ePSF figure: - ax4.axis('off') - fig.savefig(os.path.join(output_folder, 'epsf.png'), bbox_inches='tight') - plt.close(fig) - - # There was a problem with the ePSF: - if bad_epsf_detected: - raise Exception("Bad ePSF detected.") - - # Let's make the final FWHM the largest one we found: - fwhm = np.max(fwhms) - logger.info("Final FWHM based on ePSF: %f", fwhm) - - #============================================================================================== - # COORDINATES TO DO PHOTOMETRY AT - #============================================================================================== - - coordinates = np.array([[ref['pixel_column'], ref['pixel_row']] for ref in references]) - - # Add the main target position as the first entry for doing photometry directly in the - # science image: - coordinates = np.concatenate(([target_pixel_pos], coordinates), axis=0) - - #============================================================================================== - # APERTURE PHOTOMETRY - #============================================================================================== - - # Define apertures for aperture photometry: - apertures = CircularAperture(coordinates, r=fwhm) - annuli = CircularAnnulus(coordinates, r_in=1.5*fwhm, r_out=2.5*fwhm) - - apphot_tbl = aperture_photometry(image.subclean, [apertures, annuli], mask=image.mask, error=image.error) - - logger.debug("Aperture Photometry Table:\n%s", apphot_tbl) - logger.info('Apperature Photometry Success') - - #============================================================================================== - # PSF PHOTOMETRY - #============================================================================================== - - # Are we fixing the postions? - epsf.fixed.update({'x_0': False, 'y_0': False}) - - # Create photometry object: - photometry = BasicPSFPhotometry( - group_maker=DAOGroup(fwhm), - bkg_estimator=SExtractorBackground(), - psf_model=epsf, - fitter=fitting.LevMarLSQFitter(), - fitshape=size, - aperture_radius=fwhm - ) - - psfphot_tbl = photometry( - image=image.subclean, - init_guesses=Table(coordinates, names=['x_0', 'y_0']) - ) - - logger.debug("PSF Photometry Table:\n%s", psfphot_tbl) - logger.info('PSF Photometry Success') - - #============================================================================================== - # TEMPLATE SUBTRACTION AND TARGET PHOTOMETRY - #============================================================================================== - - # Find the pixel-scale of the science image: - pixel_area = proj_plane_pixel_area(image.wcs.celestial) - pixel_scale = np.sqrt(pixel_area)*3600 # arcsec/pixel - #print(image.wcs.celestial.cunit) % Doesn't work? - logger.info("Science image pixel scale: %f", pixel_scale) - - diffimage = None - if datafile.get('diffimg') is not None: - - diffimg_path = os.path.join(datafile['archive_path'], datafile['diffimg']['path']) - diffimage = load_image(diffimg_path) - diffimage = diffimage.image - - elif attempt_imagematch and datafile.get('template') is not None: - # Run the template subtraction, and get back - # the science image where the template has been subtracted: - diffimage = run_imagematch(datafile, target, star_coord=coordinates, fwhm=fwhm, pixel_scale=pixel_scale) - - # We have a diff image, so let's do photometry of the target using this: - if diffimage is not None: - # Include mask from original image: - diffimage = np.ma.masked_array(diffimage, image.mask) - - # Create apertures around the target: - apertures = CircularAperture(target_pixel_pos, r=fwhm) - annuli = CircularAnnulus(target_pixel_pos, r_in=1.5*fwhm, r_out=2.5*fwhm) - - # Create two plots of the difference image: - fig, ax = plt.subplots(1, 1, squeeze=True, figsize=(20, 20)) - plot_image(diffimage, ax=ax, cbar='right', title=target_name) - ax.plot(target_pixel_pos[0], target_pixel_pos[1], marker='+', color='r') - fig.savefig(os.path.join(output_folder, 'diffimg.png'), bbox_inches='tight') - apertures.plot(color='r') - annuli.plot(color='k') - ax.set_xlim(target_pixel_pos[0]-50, target_pixel_pos[0]+50) - ax.set_ylim(target_pixel_pos[1]-50, target_pixel_pos[1]+50) - fig.savefig(os.path.join(output_folder, 'diffimg_zoom.png'), bbox_inches='tight') - plt.close(fig) - - # Run aperture photometry on subtracted image: - target_apphot_tbl = aperture_photometry(diffimage, [apertures, annuli], mask=image.mask, error=image.error) - - # Run PSF photometry on template subtracted image: - target_psfphot_tbl = photometry( - diffimage, - init_guesses=Table(target_pixel_pos, names=['x_0', 'y_0']) - ) - - # Combine the output tables from the target and the reference stars into one: - apphot_tbl = vstack([target_apphot_tbl, apphot_tbl], join_type='exact') - psfphot_tbl = vstack([target_psfphot_tbl, psfphot_tbl], join_type='exact') - - # Build results table: - tab = references.copy() - tab.insert_row(0, {'starid': 0, 'ra': target['ra'], 'decl': target['decl'], 'pixel_column': target_pixel_pos[0], 'pixel_row': target_pixel_pos[1]}) - if diffimage is not None: - tab.insert_row(0, {'starid': -1, 'ra': target['ra'], 'decl': target['decl'], 'pixel_column': target_pixel_pos[0], 'pixel_row': target_pixel_pos[1]}) - indx_main_target = (tab['starid'] <= 0) - for key in ('pm_ra', 'pm_dec', 'gaia_mag', 'gaia_bp_mag', 'gaia_rp_mag', 'B_mag', 'V_mag', 'H_mag','J_mag','K_mag', 'u_mag', 'g_mag', 'r_mag', 'i_mag', 'z_mag'): - for i in np.where(indx_main_target)[0]: # No idea why this is needed, but giving a boolean array as slice doesn't work - tab[i][key] = np.NaN - - # Subtract background estimated from annuli: - flux_aperture = apphot_tbl['aperture_sum_0'] - (apphot_tbl['aperture_sum_1'] / annuli.area()) * apertures.area() - flux_aperture_error = np.sqrt(apphot_tbl['aperture_sum_err_0']**2 + (apphot_tbl['aperture_sum_err_1']/annuli.area() * apertures.area())**2) - - # Add table columns with results: - tab['flux_aperture'] = flux_aperture/image.exptime - tab['flux_aperture_error'] = flux_aperture_error/image.exptime - tab['flux_psf'] = psfphot_tbl['flux_fit']/image.exptime - tab['flux_psf_error'] = psfphot_tbl['flux_unc']/image.exptime - tab['pixel_column_psf_fit'] = psfphot_tbl['x_fit'] - tab['pixel_row_psf_fit'] = psfphot_tbl['y_fit'] - tab['pixel_column_psf_fit_error'] = psfphot_tbl['x_0_unc'] - tab['pixel_row_psf_fit_error'] = psfphot_tbl['y_0_unc'] - - # Check that we got valid photometry: - if np.any(~np.isfinite(tab[indx_main_target]['flux_psf'])) or np.any(~np.isfinite(tab[indx_main_target]['flux_psf_error'])): - raise Exception("Target magnitude is undefined.") - - #============================================================================================== - # CALIBRATE - #============================================================================================== - - # Convert PSF fluxes to magnitudes: - mag_inst = -2.5 * np.log10(tab['flux_psf']) - mag_inst_err = (2.5/np.log(10)) * (tab['flux_psf_error'] / tab['flux_psf']) - - # Corresponding magnitudes in catalog: - #TODO: add color terms here - mag_catalog = tab[ref_filter] - - # Mask out things that should not be used in calibration: - use_for_calibration = np.ones_like(mag_catalog, dtype='bool') - use_for_calibration[indx_main_target] = False # Do not use target for calibration - use_for_calibration[~np.isfinite(mag_inst) | ~np.isfinite(mag_catalog)] = False - - # Just creating some short-hands: - x = mag_catalog[use_for_calibration] - y = mag_inst[use_for_calibration] - yerr = mag_inst_err[use_for_calibration] - weights = 1.0/yerr**2 - - # Fit linear function with fixed slope, using sigma-clipping: - model = models.Linear1D(slope=1, fixed={'slope': True}) - fitter = fitting.FittingWithOutlierRemoval(fitting.LinearLSQFitter(), sigma_clip, sigma=3.0) - best_fit, sigma_clipped = fitter(model, x, y, weights=weights) - - # Extract zero-point and estimate its error using a single weighted fit: - # I don't know why there is not an error-estimate attached directly to the Parameter? - zp = -1*best_fit.intercept.value # Negative, because that is the way zeropoints are usually defined - - weights[sigma_clipped] = 0 # Trick to make following expression simpler - N = len(weights.nonzero()[0]) - if N > 1: - zp_error = np.sqrt( N * nansum(weights*(y - best_fit(x))**2) / nansum(weights) / (N-1) ) - else: - zp_error = np.NaN - logger.info('Leastsquare ZP = %.3f, ZP_error = %.3f', zp, zp_error) - - # Determine sigma clipping sigma according to Chauvenet method - # But don't allow less than sigma = sigmamin, setting to 1.5 for now. - # Should maybe be 2? - sigmamin = 1.5 - sigChauv = sigma_from_Chauvenet(len(x)) - sigChauv = sigChauv if sigChauv >= sigmamin else sigmamin - - # Extract zero point and error using bootstrap method - Nboot = 1000 - logger.info('Running bootstrap with sigma = %.2f and n = %d', sigChauv, Nboot) - pars = bootstrap_outlier(x, y, yerr, n=Nboot, model=model, fitter=fitting.LinearLSQFitter, - outlier=sigma_clip, outlier_kwargs={'sigma':sigChauv}, summary='median', - error='bootstrap', return_vals=False) - - zp_bs = pars['intercept'] * -1.0 - zp_error_bs = pars['intercept_error'] - - logger.info('Bootstrapped ZP = %.3f, ZP_error = %.3f', zp_bs, zp_error_bs) - - # Check that difference is not large - zp_diff = 0.4 - if np.abs(zp_bs - zp) >= zp_diff: - logger.warning("Bootstrap and weighted LSQ ZPs differ by %.2f, \ - which is more than the allowed %.2f mag.", np.abs(zp_bs - zp), zp_diff) - - # Add calibrated magnitudes to the photometry table: - tab['mag'] = mag_inst + zp_bs - tab['mag_error'] = np.sqrt(mag_inst_err**2 + zp_error_bs**2) - - fig, ax = plt.subplots(1, 1) - ax.errorbar(x, y, yerr=yerr, fmt='k.') - ax.scatter(x[sigma_clipped], y[sigma_clipped], marker='x', c='r') - ax.plot(x, best_fit(x), color='g', linewidth=3) - ax.set_xlabel('Catalog magnitude') - ax.set_ylabel('Instrumental magnitude') - fig.savefig(os.path.join(output_folder, 'calibration.png'), bbox_inches='tight') - plt.close(fig) - - # Check that we got valid photometry: - if not np.isfinite(tab[0]['mag']) or not np.isfinite(tab[0]['mag_error']): - raise Exception("Target magnitude is undefined.") - - #============================================================================================== - # SAVE PHOTOMETRY - #============================================================================================== - - # Descriptions of columns: - tab['flux_aperture'].unit = u.count/u.second - tab['flux_aperture_error'].unit = u.count/u.second - tab['flux_psf'].unit = u.count/u.second - tab['flux_psf_error'].unit = u.count/u.second - tab['pixel_column'].unit = u.pixel - tab['pixel_row'].unit = u.pixel - tab['pixel_column_psf_fit'].unit = u.pixel - tab['pixel_row_psf_fit'].unit = u.pixel - tab['pixel_column_psf_fit_error'].unit = u.pixel - tab['pixel_row_psf_fit_error'].unit = u.pixel - - # Meta-data: - tab.meta['version'] = __version__ - tab.meta['fileid'] = fileid - tab.meta['template'] = None if datafile.get('template') is None else datafile['template']['fileid'] - tab.meta['diffimg'] = None if datafile.get('diffimg') is None else datafile['diffimg']['fileid'] - tab.meta['photfilter'] = photfilter - tab.meta['fwhm'] = fwhm * u.pixel - tab.meta['pixel_scale'] = pixel_scale * u.arcsec/u.pixel - tab.meta['seeing'] = (fwhm*pixel_scale) * u.arcsec - tab.meta['obstime-bmjd'] = float(image.obstime.mjd) - tab.meta['zp'] = zp_bs - tab.meta['zp_error'] = zp_error_bs - tab.meta['zp_diff'] = np.abs(zp_bs - zp) - tab.meta['zp_error_weights'] = zp_error - - # Filepath where to save photometry: - photometry_output = os.path.join(output_folder, 'photometry.ecsv') - - # Write the final table to file: - tab.write(photometry_output, format='ascii.ecsv', delimiter=',', overwrite=True) - - toc = default_timer() - - logger.info("------------------------------------------------------") - logger.info("Success!") - logger.info("Main target: %f +/- %f", tab[0]['mag'], tab[0]['mag_error']) - logger.info("Photometry took: %f seconds", toc-tic) - - return photometry_output + +# -------------------------------------------------------------------------------------------------- +def photometry(fileid, + output_folder=None, + attempt_imagematch=True, + keep_diff_fixed=False, + timeoutpar='None'): + """ + Run photometry. + + Parameters: + fileid (int): File ID to process. + output_folder (str, optional): Path to directory where output should be placed. + attempt_imagematch (bool, optional): If no subtracted image is available, but a + template image is, should we attempt to run ImageMatch using standard settings. + Default=True. + keep_diff_fixed (bool, optional): Whether to allow psf photometry to recenter when + calculating the flux for the difference image. Setting to true can help if diff + image has non-source flux in the region around the SN. This will also force + using Median background instead of Sextractor for the diff image PSF photometry. + .. codeauthor:: Rasmus Handberg + .. codeauthor:: Emir Karamehmetoglu + .. codeauthor:: Simon Holmbo + """ + + # Settings: + # ref_mag_limit = 22 # Lower limit on reference target brightness + ref_target_dist_limit = 10 * u.arcsec # Reference star must be further than this away to be included + + logger = logging.getLogger(__name__) + tic = default_timer() + + # Use local copy of archive if configured to do so: + config = load_config() + + # Get datafile dict from API: + datafile = api.get_datafile(fileid) + logger.debug("Datafile: %s", datafile) + targetid = datafile['targetid'] + target_name = datafile['target_name'] + photfilter = datafile['photfilter'] + + archive_local = config.get('photometry', 'archive_local', fallback=None) + if archive_local is not None: + datafile['archive_path'] = archive_local + if not os.path.isdir(datafile['archive_path']): + raise FileNotFoundError("ARCHIVE is not available: " + datafile['archive_path']) + + # Get the catalog containing the target and reference stars: + # TODO: Include proper-motion to the time of observation + catalog = api.get_catalog(targetid, output='table') + target = catalog['target'][0] + target_coord = coords.SkyCoord(ra=target['ra'], + dec=target['decl'], + unit='deg', + frame='icrs') + + # Folder to save output: + if output_folder is None: + output_folder_root = config.get('photometry', 'output', fallback='.') + output_folder = os.path.join(output_folder_root, target_name, + '%05d' % fileid) + logger.info("Placing output in '%s'", output_folder) + os.makedirs(output_folder, exist_ok=True) + + # The paths to the science image: + filepath = os.path.join(datafile['archive_path'], datafile['path']) + + # TODO: Download datafile using API to local drive: + # TODO: Is this a security concern? + # if archive_local: + # api.download_datafile(datafile, archive_local) + + # Translate photometric filter into table column: + ref_filter = { + 'up': 'u_mag', + 'gp': 'g_mag', + 'rp': 'r_mag', + 'ip': 'i_mag', + 'zp': 'z_mag', + 'B': 'B_mag', + 'V': 'V_mag', + #'Y': 'Y_mag', + 'J': 'J_mag', + 'H': 'H_mag', + 'K': 'K_mag', + }.get(photfilter, None) + + if ref_filter is None: + logger.warning( + "Could not find filter '%s' in catalogs. Using default gp filter.", + photfilter) + ref_filter = 'g_mag' + + # Load the image from the FITS file: + logger.info("Load image '%s'", filepath) + image = load_image(filepath) + + references = catalog['references'] + references.sort(ref_filter) + + # Check that there actually are reference stars in that filter: + if allnan(references[ref_filter]): + raise ValueError("No reference stars found in current photfilter.") + + # ============================================================================================== + # BARYCENTRIC CORRECTION OF TIME + # ============================================================================================== + + ltt_bary = image.obstime.light_travel_time(target_coord, ephemeris='jpl') + image.obstime = image.obstime.tdb + ltt_bary + + # ============================================================================================== + # BACKGROUND ESTIMATION + # ============================================================================================== + + fig, ax = plt.subplots(1, 2, figsize=(20, 18)) + plot_image(image.clean, ax=ax[0], scale='log', cbar='right', title='Image') + plot_image(image.mask, + ax=ax[1], + scale='linear', + cbar='right', + title='Mask') + fig.savefig(os.path.join(output_folder, 'original.png'), + bbox_inches='tight') + plt.close(fig) + + # Estimate image background: + # Not using image.clean here, since we are redefining the mask anyway + background = Background2D(image.clean, (128, 128), + filter_size=(5, 5), + sigma_clip=SigmaClip(sigma=3.0), + bkg_estimator=SExtractorBackground(), + exclude_percentile=50.0) + + # Create background-subtracted image: + image.subclean = image.clean - background.background + + # Plot background estimation: + fig, ax = plt.subplots(1, 3, figsize=(20, 6)) + plot_image(image.clean, ax=ax[0], scale='log', title='Original') + plot_image(background.background, + ax=ax[1], + scale='log', + title='Background') + plot_image(image.subclean, + ax=ax[2], + scale='log', + title='Background subtracted') + fig.savefig(os.path.join(output_folder, 'background.png'), + bbox_inches='tight') + plt.close(fig) + + # TODO: Is this correct?! + image.error = calc_total_error(image.clean, background.background_rms, 1.0) + + # Use sep to for soure extraction + sep_background = sep.Background(image.clean.data, mask=image.mask) + msg = 'sub: {} bkg_rms: {} mask: {}'.format(image.shape, np.shape(sep_background.globalrms), image.shape) + logger.debug(msg) + objects = sep.extract(image.clean.data - sep_background, + thresh=5., + err=sep_background.globalrms, + mask=image.mask, + deblend_cont=0.1, + minarea=9, + clean_param=2.0) + + # ============================================================================================== + # DETECTION OF STARS AND MATCHING WITH CATALOG + # ============================================================================================== + + # Account for proper motion: + # TODO: Are catalog RA-proper motions including cosdec? + replace(references['pm_ra'], np.NaN, 0) + replace(references['pm_dec'], np.NaN, 0) + refs_coord = coords.SkyCoord(ra=references['ra'], + dec=references['decl'], + pm_ra_cosdec=references['pm_ra'], + pm_dec=references['pm_dec'], + unit='deg', + frame='icrs', + obstime=Time(2015.5, format='decimalyear')) + + with warnings.catch_warnings(): + warnings.simplefilter("ignore", ErfaWarning) + refs_coord = refs_coord.apply_space_motion(image.obstime) + + # @TODO: These need to be based on the instrument! + radius = 10 + fwhm_guess = 6.0 + fwhm_min = 3.5 + fwhm_max = 18.0 + + # Clean extracted stars + masked_sep_xy, sep_mask, masked_sep_rsqs = force_reject_g2d( + objects['x'], + objects['y'], + image, + get_fwhm=False, + radius=radius, + fwhm_guess=fwhm_guess, + rsq_min=0.3, + fwhm_max=fwhm_max, + fwhm_min=fwhm_min) + + head_wcs = str(WCS2.from_astropy_wcs(image.wcs)) + logger.debug('Head WCS: %s', head_wcs) + references.meta['head_wcs'] = head_wcs + + # Solve for new WCS + cm = CoordinateMatch( + xy=list(masked_sep_xy[sep_mask]), + rd=list(zip(refs_coord.ra.deg, refs_coord.dec.deg)), + xy_order=np.argsort( + np.power(masked_sep_xy[sep_mask] - np.array(image.shape[::-1]) / 2, + 2).sum(axis=1)), + rd_order=np.argsort(target_coord.separation(refs_coord)), + xy_nmax=100, + rd_nmax=100, + maximum_angle_distance=0.002, + ) + + # Set timeout par to infinity unless specified. + if timeoutpar == 'None': timeoutpar = float('inf') + try: + i_xy, i_rd = map(np.array, zip(*cm(5, 1.5, timeout=timeoutpar))) + except TimeoutError: + logger.warning('TimeoutError: No new WCS solution found') + except StopIteration: + logger.warning('StopIterationError: No new WCS solution found') + else: + logger.info('Found new WCS') + image.wcs = fit_wcs_from_points( + np.array(list(zip(*cm.xy[i_xy]))), + coords.SkyCoord(*map(list, zip(*cm.rd[i_rd])), unit='deg')) + + used_wcs = str(WCS2.from_astropy_wcs(image.wcs)) + logger.debug('Used WCS: %s', used_wcs) + references.meta['used_wcs'] = used_wcs + + # Calculate pixel-coordinates of references: + xy = image.wcs.all_world2pix( + list(zip(refs_coord.ra.deg, refs_coord.dec.deg)), 0) + references['pixel_column'], references['pixel_row'] = x, y = list( + map(np.array, zip(*xy))) + + # Clean out the references: + hsize = 10 + clean_references = references[ + (target_coord.separation(refs_coord) > ref_target_dist_limit) + & (x > hsize) & (x < (image.shape[1] - 1 - hsize)) + & (y > hsize) & (y < (image.shape[0] - 1 - hsize))] + # & (references[ref_filter] < ref_mag_limit) + assert len(clean_references), 'No clean references in field' + + # Calculate the targets position in the image: + target_pixel_pos = image.wcs.all_world2pix( + [(target['ra'], target['decl'])], 0)[0] + + # Clean reference star locations + masked_fwhms, masked_ref_xys, rsq_mask, masked_rsqs = force_reject_g2d( + clean_references['pixel_column'], + clean_references['pixel_row'], + image, + get_fwhm=True, + radius=radius, + fwhm_guess=fwhm_guess, + fwhm_max=fwhm_max, + fwhm_min=fwhm_min, + rsq_min=0.15) + + # Use R^2 to more robustly determine initial FWHM guess. + # This cleaning is good when we have FEW references. + fwhm, fwhm_clean_references = clean_with_rsq_and_get_fwhm( + masked_fwhms, + masked_rsqs, + clean_references, + min_fwhm_references=2, + min_references=6, + rsq_min=0.15) + msg = 'Initial FWHM guess is {} pixels'.format(fwhm) + logger.info(msg) + image.fwhm = fwhm + + # Create plot of target and reference star positions from 2D Gaussian fits. + fig, ax = plt.subplots(1, 1, figsize=(20, 18)) + plot_image(image.subclean, + ax=ax, + scale='log', + cbar='right', + title=target_name) + ax.scatter(fwhm_clean_references['pixel_column'], + fwhm_clean_references['pixel_row'], + c='r', + marker='o', + alpha=0.3) + ax.scatter(masked_sep_xy[:, 0], + masked_sep_xy[:, 1], + marker='s', + alpha=1.0, + edgecolors='green', + facecolors='none') + ax.scatter(target_pixel_pos[0], + target_pixel_pos[1], + marker='+', + s=20, + c='r') + fig.savefig(os.path.join(output_folder, 'positions_g2d.png'), + bbox_inches='tight') + plt.close(fig) + + # Uncomment For Debugging + # return references, clean_references, masked_rsqs, rsq_mask + + # Final clean of wcs corrected references + logger.info("Number of references before final cleaning: %d", len(clean_references)) + msg = 'masked R^2 values: {}'.format(masked_rsqs[rsq_mask]) + logger.debug(msg) + references = get_clean_references(clean_references, + masked_rsqs, + rsq_ideal=0.8) + logger.info("Number of references after final cleaning: %d", + len(references)) + + # Create plot of target and reference star positions: + fig, ax = plt.subplots(1, 1, figsize=(20, 18)) + plot_image(image.subclean, + ax=ax, + scale='log', + cbar='right', + title=target_name) + ax.scatter(references['pixel_column'], + references['pixel_row'], + c='r', + marker='o', + alpha=0.6) + ax.scatter(masked_sep_xy[:, 0], + masked_sep_xy[:, 1], + marker='s', + alpha=0.6, + edgecolors='green', + facecolors='none') + ax.scatter(target_pixel_pos[0], + target_pixel_pos[1], + marker='+', + s=20, + c='r') + fig.savefig(os.path.join(output_folder, 'positions.png'), + bbox_inches='tight') + plt.close(fig) + + # ============================================================================================== + # CREATE EFFECTIVE PSF MODEL + # ============================================================================================== + + # Make cutouts of stars using extract_stars: + # Scales with FWHM + size = int(np.round(29 * fwhm / 6)) + size += 0 if size % 2 else 1 # Make sure it's a uneven number + size = max(size, 15) # Never go below 15 pixels + + # Extract stars sub-images: + xy = [tuple(masked_ref_xys[clean_references['starid'] == ref['starid']].data[0]) for ref in references] # FIXME + with warnings.catch_warnings(): + warnings.simplefilter('ignore', AstropyUserWarning) + stars = extract_stars( + NDData(data=image.subclean.data, mask=image.mask), + Table(np.array(xy), names=('x', 'y')), + size=size + 6 # +6 for edge buffer + ) + + # Store which stars were used in ePSF in the table: + references['used_for_epsf'] = False + references['used_for_epsf'][[star.id_label - 1 for star in stars]] = True + logger.info("Number of stars used for ePSF: %d", len(stars)) + + # Plot the stars being used for ePSF: + imgnr = 0 + nrows, ncols = 5, 5 + for k in range(int(np.ceil(len(stars) / (nrows * ncols)))): + fig, ax = plt.subplots(nrows=nrows, + ncols=ncols, + figsize=(20, 20), + squeeze=True) + ax = ax.ravel() + for i in range(nrows * ncols): + if imgnr > len(stars) - 1: + ax[i].axis('off') + else: + #offset_axes = stars[imgnr].bbox.ixmin, stars[imgnr].bbox.iymin + plot_image(stars[imgnr], ax=ax[i], scale='log', cmap='viridis') # , offset_axes=offset_axes) FIXME (no x-ticks) + imgnr += 1 + + fig.savefig(os.path.join(output_folder, + 'epsf_stars%02d.png' % (k + 1)), + bbox_inches='tight') + plt.close(fig) + + # Build the ePSF: + epsf, stars = EPSFBuilder( + oversampling=1, + shape=1 * size, + fitter=EPSFFitter(fit_boxsize=max(np.round(1.5 * fwhm).astype(int), 5)), + recentering_boxsize=max(np.round(2 * fwhm).astype(int), 5), + norm_radius=max(fwhm, 5), + maxiters=100, + )(stars) + msg = 'Built PSF model ({n_iter}/{max_iters}) in {time:.1f}s'.format(**epsf.fit_info) + logger.info(msg) + + fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 15)) + plot_image(epsf.data, ax=ax1, cmap='viridis') + + fwhms = [] + bad_epsf_detected = False + for a, ax in ((0, ax3), (1, ax2)): + # Collapse the PDF along this axis: + profile = epsf.data.sum(axis=a) + itop = profile.argmax() + poffset = profile[itop] / 2 + + # Run a spline through the points, but subtract half of the peak value, and find the roots: + # We have to use a cubic spline, since roots() is not supported for other splines + # for some reason + profile_intp = UnivariateSpline(np.arange(0, len(profile)), + profile - poffset, + k=3, + s=0, + ext=3) + lr = profile_intp.roots() + + # Plot the profile and spline: + x_fine = np.linspace(-0.5, len(profile) - 0.5, 500) + ax.plot(profile, 'k.-') + ax.plot(x_fine, profile_intp(x_fine) + poffset, 'g-') + ax.axvline(itop) + ax.set_xlim(-0.5, len(profile) - 0.5) + + # Do some sanity checks on the ePSF: + # It should pass 50% exactly twice and have the maximum inside that region. + # I.e. it should be a single gaussian-like peak + if len(lr) != 2 or itop < lr[0] or itop > lr[1]: + logger.error("Bad PSF along axis %d", a) + bad_epsf_detected = True + else: + axis_fwhm = lr[1] - lr[0] + fwhms.append(axis_fwhm) + ax.axvspan(lr[0], lr[1], facecolor='g', alpha=0.2) + + # Save the ePSF figure: + ax4.axis('off') + fig.savefig(os.path.join(output_folder, 'epsf.png'), bbox_inches='tight') + plt.close(fig) + + # There was a problem with the ePSF: + if bad_epsf_detected: + raise Exception("Bad ePSF detected.") + + # Let's make the final FWHM the largest one we found: + fwhm = np.max(fwhms) + image.fwhm = fwhm + logger.info("Final FWHM based on ePSF: %f", fwhm) + + # ============================================================================================== + # COORDINATES TO DO PHOTOMETRY AT + # ============================================================================================== + + coordinates = np.array([[ref['pixel_column'], ref['pixel_row']] + for ref in references]) + + # Add the main target position as the first entry for doing photometry directly in the + # science image: + coordinates = np.concatenate(([target_pixel_pos], coordinates), axis=0) + + # ============================================================================================== + # APERTURE PHOTOMETRY + # ============================================================================================== + + # Define apertures for aperture photometry: + apertures = CircularAperture(coordinates, r=fwhm) + annuli = CircularAnnulus(coordinates, r_in=1.5 * fwhm, r_out=2.5 * fwhm) + + apphot_tbl = aperture_photometry(image.subclean, [apertures, annuli], + mask=image.mask, + error=image.error) + + logger.info('Aperture Photometry Success') + logger.debug("Aperture Photometry Table:\n%s", apphot_tbl) + + # ============================================================================================== + # PSF PHOTOMETRY + # ============================================================================================== + + # Create photometry object: + photometry_obj = BasicPSFPhotometry(group_maker=DAOGroup(fwhm), + bkg_estimator=MedianBackground(), + psf_model=epsf, + fitter=fitting.LevMarLSQFitter(), + fitshape=size, + aperture_radius=fwhm) + + psfphot_tbl = photometry_obj(image=image.subclean, + init_guesses=Table(coordinates, + names=['x_0', 'y_0'])) + + logger.info('PSF Photometry Success') + logger.debug("PSF Photometry Table:\n%s", psfphot_tbl) + + # ============================================================================================== + # TEMPLATE SUBTRACTION AND TARGET PHOTOMETRY + # ============================================================================================== + + # Find the pixel-scale of the science image: + pixel_area = proj_plane_pixel_area(image.wcs.celestial) + pixel_scale = np.sqrt(pixel_area) * 3600 # arcsec/pixel + # print(image.wcs.celestial.cunit) % Doesn't work? + logger.info("Science image pixel scale: %f", pixel_scale) + + diffimage = None + if datafile.get('diffimg') is not None: + + diffimg_path = os.path.join(datafile['archive_path'], + datafile['diffimg']['path']) + diffimg = load_image(diffimg_path) + diffimage = diffimg.image + + elif attempt_imagematch and datafile.get('template') is not None: + # Run the template subtraction, and get back + # the science image where the template has been subtracted: + diffimage = run_imagematch(datafile, + target, + star_coord=coordinates, + fwhm=fwhm, + pixel_scale=pixel_scale) + + # We have a diff image, so let's do photometry of the target using this: + if diffimage is not None: + # Include mask from original image: + diffimage = np.ma.masked_array(diffimage, image.mask) + + # Create apertures around the target: + apertures = CircularAperture(target_pixel_pos, r=fwhm) + annuli = CircularAnnulus(target_pixel_pos, + r_in=1.5 * fwhm, + r_out=2.5 * fwhm) + + # Create two plots of the difference image: + fig, ax = plt.subplots(1, 1, squeeze=True, figsize=(20, 20)) + plot_image(diffimage, ax=ax, cbar='right', title=target_name) + ax.plot(target_pixel_pos[0], + target_pixel_pos[1], + marker='+', + color='r') + fig.savefig(os.path.join(output_folder, 'diffimg.png'), + bbox_inches='tight') + apertures.plot(color='r') + annuli.plot(color='k') + ax.set_xlim(target_pixel_pos[0] - 50, target_pixel_pos[0] + 50) + ax.set_ylim(target_pixel_pos[1] - 50, target_pixel_pos[1] + 50) + fig.savefig(os.path.join(output_folder, 'diffimg_zoom.png'), + bbox_inches='tight') + plt.close(fig) + + # Run aperture photometry on subtracted image: + target_apphot_tbl = aperture_photometry(diffimage, [apertures, annuli], + mask=image.mask, + error=image.error) + + # Make target only photometry object if keep_diff_fixed = True + if keep_diff_fixed: + epsf.fixed.update({'x_0': True, 'y_0': True}) + + # @TODO: Try iteraratively subtracted photometry + # Create photometry object: + photometry_obj = BasicPSFPhotometry( + group_maker=DAOGroup(0.0001), + bkg_estimator=MedianBackground(), + psf_model=epsf, + fitter=fitting.LevMarLSQFitter(), + fitshape=size, + aperture_radius=fwhm) + + # Run PSF photometry on template subtracted image: + target_psfphot_tbl = photometry_obj(diffimage, + init_guesses=Table( + target_pixel_pos, + names=['x_0', 'y_0'])) + + if keep_diff_fixed: # Need to adjust table columns if x_0 and y_0 were fixed + target_psfphot_tbl['x_0_unc'] = 0.0 + target_psfphot_tbl['y_0_unc'] = 0.0 + + # Combine the output tables from the target and the reference stars into one: + apphot_tbl = vstack([target_apphot_tbl, apphot_tbl], join_type='exact') + psfphot_tbl = vstack([target_psfphot_tbl, psfphot_tbl], + join_type='exact') + + # Build results table: + tab = references.copy() + + row = { + 'starid': 0, + 'ra': target['ra'], + 'decl': target['decl'], + 'pixel_column': target_pixel_pos[0], + 'pixel_row': target_pixel_pos[1] + } + row.update([(k, np.NaN) + for k in set(tab.keys()) - set(row) - {'gaia_variability'}]) + tab.insert_row(0, row) + + if diffimage is not None: + row['starid'] = -1 + tab.insert_row(0, row) + + indx_main_target = tab['starid'] <= 0 + + # Subtract background estimated from annuli: + flux_aperture = apphot_tbl['aperture_sum_0'] - ( + apphot_tbl['aperture_sum_1'] / annuli.area) * apertures.area + flux_aperture_error = np.sqrt(apphot_tbl['aperture_sum_err_0']**2 + (apphot_tbl['aperture_sum_err_1'] / annuli.area * apertures.area)**2) + + # Add table columns with results: + tab['flux_aperture'] = flux_aperture / image.exptime + tab['flux_aperture_error'] = flux_aperture_error / image.exptime + tab['flux_psf'] = psfphot_tbl['flux_fit'] / image.exptime + tab['flux_psf_error'] = psfphot_tbl['flux_unc'] / image.exptime + tab['pixel_column_psf_fit'] = psfphot_tbl['x_fit'] + tab['pixel_row_psf_fit'] = psfphot_tbl['y_fit'] + tab['pixel_column_psf_fit_error'] = psfphot_tbl['x_0_unc'] + tab['pixel_row_psf_fit_error'] = psfphot_tbl['y_0_unc'] + + # Check that we got valid photometry: + if np.any(~np.isfinite(tab[indx_main_target]['flux_psf'])) or np.any( + ~np.isfinite(tab[indx_main_target]['flux_psf_error'])): + raise Exception("Target magnitude is undefined.") + + # ============================================================================================== + # CALIBRATE + # ============================================================================================== + + # Convert PSF fluxes to magnitudes: + mag_inst = -2.5 * np.log10(tab['flux_psf']) + mag_inst_err = (2.5 / np.log(10)) * (tab['flux_psf_error'] / tab['flux_psf']) + + # Corresponding magnitudes in catalog: + # TODO: add color terms here + mag_catalog = tab[ref_filter] + + # Mask out things that should not be used in calibration: + use_for_calibration = np.ones_like(mag_catalog, dtype='bool') + use_for_calibration[ + indx_main_target] = False # Do not use target for calibration + use_for_calibration[~np.isfinite(mag_inst) + | ~np.isfinite(mag_catalog)] = False + + # Just creating some short-hands: + x = mag_catalog[use_for_calibration] + y = mag_inst[use_for_calibration] + yerr = mag_inst_err[use_for_calibration] + weights = 1.0 / yerr**2 + + assert any(use_for_calibration), "No calibration stars" + + # Fit linear function with fixed slope, using sigma-clipping: + model = models.Linear1D(slope=1, fixed={'slope': True}) + fitter = fitting.FittingWithOutlierRemoval(fitting.LinearLSQFitter(), + sigma_clip, + sigma=3.0) + best_fit, sigma_clipped = fitter(model, x, y, weights=weights) + + # Extract zero-point and estimate its error using a single weighted fit: + # I don't know why there is not an error-estimate attached directly to the Parameter? + zp = -1 * best_fit.intercept.value # Negative, because that is the way zeropoints are usually defined + + weights[sigma_clipped] = 0 # Trick to make following expression simpler + n_weights = len(weights.nonzero()[0]) + if n_weights > 1: + zp_error = np.sqrt(n_weights * nansum(weights * (y - best_fit(x))**2) / nansum(weights) / (n_weights - 1)) + else: + zp_error = np.NaN + logger.info('Leastsquare ZP = %.3f, ZP_error = %.3f', zp, zp_error) + + # Determine sigma clipping sigma according to Chauvenet method + # But don't allow less than sigma = sigmamin, setting to 1.5 for now. + # Should maybe be 2? + sigmamin = 1.5 + sig_chauv = sigma_from_Chauvenet(len(x)) + sig_chauv = sig_chauv if sig_chauv >= sigmamin else sigmamin + + # Extract zero point and error using bootstrap method + nboot = 1000 + logger.info('Running bootstrap with sigma = %.2f and n = %d', sig_chauv, + nboot) + pars = bootstrap_outlier(x, + y, + yerr, + n=nboot, + model=model, + fitter=fitting.LinearLSQFitter, + outlier=sigma_clip, + outlier_kwargs={'sigma': sig_chauv}, + summary='median', + error='bootstrap', + return_vals=False) + + zp_bs = pars['intercept'] * -1.0 + zp_error_bs = pars['intercept_error'] + + logger.info('Bootstrapped ZP = %.3f, ZP_error = %.3f', zp_bs, zp_error_bs) + + # Check that difference is not large + zp_diff = 0.4 + if np.abs(zp_bs - zp) >= zp_diff: + logger.warning( + "Bootstrap and weighted LSQ ZPs differ by %.2f, \ + which is more than the allowed %.2f mag.", np.abs(zp_bs - zp), zp_diff) + + # Add calibrated magnitudes to the photometry table: + tab['mag'] = mag_inst + zp_bs + tab['mag_error'] = np.sqrt(mag_inst_err**2 + zp_error_bs**2) + + fig, ax = plt.subplots(1, 1) + ax.errorbar(x, y, yerr=yerr, fmt='k.') + ax.scatter(x[sigma_clipped], y[sigma_clipped], marker='x', c='r') + ax.plot(x, best_fit(x), color='g', linewidth=3) + ax.set_xlabel('Catalog magnitude') + ax.set_ylabel('Instrumental magnitude') + fig.savefig(os.path.join(output_folder, 'calibration.png'), + bbox_inches='tight') + plt.close(fig) + + # Check that we got valid photometry: + if not np.isfinite(tab[0]['mag']) or not np.isfinite(tab[0]['mag_error']): + raise Exception("Target magnitude is undefined.") + + # ============================================================================================== + # SAVE PHOTOMETRY + # ============================================================================================== + + # rename x, y columns to pixel_colum, pixel_row + #tab.rename_columns(('x', 'y'), ('pixel_column', 'pixel_row')) + + # Descriptions of columns: + tab['flux_aperture'].unit = u.count / u.second + tab['flux_aperture_error'].unit = u.count / u.second + tab['flux_psf'].unit = u.count / u.second + tab['flux_psf_error'].unit = u.count / u.second + tab['pixel_column'].unit = u.pixel + tab['pixel_row'].unit = u.pixel + tab['pixel_column_psf_fit'].unit = u.pixel + tab['pixel_row_psf_fit'].unit = u.pixel + tab['pixel_column_psf_fit_error'].unit = u.pixel + tab['pixel_row_psf_fit_error'].unit = u.pixel + + # Meta-data: + tab.meta['version'] = __version__ + tab.meta['fileid'] = fileid + tab.meta['template'] = None if datafile.get( + 'template') is None else datafile['template']['fileid'] + tab.meta['diffimg'] = None if datafile.get( + 'diffimg') is None else datafile['diffimg']['fileid'] + tab.meta['photfilter'] = photfilter + tab.meta['fwhm'] = fwhm * u.pixel + tab.meta['pixel_scale'] = pixel_scale * u.arcsec / u.pixel + tab.meta['seeing'] = (fwhm * pixel_scale) * u.arcsec + tab.meta['obstime-bmjd'] = float(image.obstime.mjd) + tab.meta['zp'] = zp_bs + tab.meta['zp_error'] = zp_error_bs + tab.meta['zp_diff'] = np.abs(zp_bs - zp) + tab.meta['zp_error_weights'] = zp_error + + # Filepath where to save photometry: + photometry_output = os.path.join(output_folder, 'photometry.ecsv') + + # Write the final table to file: + tab.write(photometry_output, + format='ascii.ecsv', + delimiter=',', + overwrite=True) + + toc = default_timer() + + logger.info("------------------------------------------------------") + logger.info("Success!") + logger.info("Main target: %f +/- %f", tab[0]['mag'], tab[0]['mag_error']) + logger.info("Photometry took: %.1f seconds", toc - tic) + + return photometry_output diff --git a/flows/wcs.py b/flows/wcs.py new file mode 100644 index 0000000..6fd1e9a --- /dev/null +++ b/flows/wcs.py @@ -0,0 +1,345 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Clean bad source extraction, find and correct wcs + +.. codeauthor:: Emir Karamehmetoglu +""" + +import numpy as np +import astroalign as aa +import astropy.coordinates as coords +import astropy.wcs as wcs +from astropy.stats import sigma_clip, gaussian_fwhm_to_sigma +from astropy.modeling import models, fitting +from copy import deepcopy +from bottleneck import nanmedian + + +class MinStarError(RuntimeError): + pass + + +def force_reject_g2d(xarray, + yarray, + image, + get_fwhm=True, + rsq_min=0.5, + radius=10, + fwhm_guess=6.0, + fwhm_min=3.5, + fwhm_max=18.0): + '''xarray, yarray, image, get_fwhm=True, rsq_min=0.5, radius=10, fwhm_guess=6.0, fwhm_min=3.5, + fwhm_max=18.0''' + # Set up 2D Gaussian model for fitting to reference stars: + g2d = models.Gaussian2D(amplitude=1.0, + x_mean=radius, + y_mean=radius, + x_stddev=fwhm_guess * gaussian_fwhm_to_sigma) + g2d.amplitude.bounds = (0.1, 2.0) + g2d.x_mean.bounds = (0.5 * radius, 1.5 * radius) + g2d.y_mean.bounds = (0.5 * radius, 1.5 * radius) + g2d.x_stddev.bounds = (fwhm_min * gaussian_fwhm_to_sigma, + fwhm_max * gaussian_fwhm_to_sigma) + g2d.y_stddev.tied = lambda model: model.x_stddev + g2d.theta.fixed = True + + gfitter = fitting.LevMarLSQFitter() + + # Stars reject + N = len(xarray) + fwhms = np.full((N, 2), np.NaN) + xys = np.full((N, 2), np.NaN) + rsqs = np.full(N, np.NaN) + for i, (x, y) in enumerate(zip(xarray, yarray)): + x = int(np.round(x)) + y = int(np.round(y)) + xmin = max(x - radius, 0) + xmax = min(x + radius + 1, image.shape[1]) + ymin = max(y - radius, 0) + ymax = min(y + radius + 1, image.shape[0]) + + curr_star = deepcopy(image.subclean[ymin:ymax, xmin:xmax]) + + edge = np.zeros_like(curr_star, dtype='bool') + edge[(0, -1), :] = True + edge[:, (0, -1)] = True + curr_star -= nanmedian(curr_star[edge]) + curr_star /= np.max(curr_star) + + ypos, xpos = np.mgrid[:curr_star.shape[0], :curr_star.shape[1]] + gfit = gfitter(g2d, x=xpos, y=ypos, z=curr_star) + + # Center + xys[i] = np.array([gfit.x_mean + x - radius, gfit.y_mean + y - radius], + dtype=np.float64) + # Calculate rsq + sstot = ((curr_star - curr_star.mean())**2).sum() + sserr = (gfitter.fit_info['fvec']**2).sum() + rsqs[i] = 1. - (sserr / sstot) + # FWHM + fwhms[i] = gfit.x_fwhm + + masked_xys = np.ma.masked_array(xys, ~np.isfinite(xys)) + masked_rsqs = np.ma.masked_array(rsqs, ~np.isfinite(rsqs)) + mask = (masked_rsqs >= rsq_min) & (masked_rsqs < 1.0 + ) # Reject Rsq < rsq_min + # changed + #masked_xys = masked_xys[mask] # Clean extracted array. + # to + masked_xys.mask[~mask] = True + # don't know if it breaks anything, but it doesn't make sence if + # len(masked_xys) != len(masked_rsqs) FIXME + masked_fwhms = np.ma.masked_array(fwhms, ~np.isfinite(fwhms)) + + if get_fwhm: return masked_fwhms, masked_xys, mask, masked_rsqs + return masked_xys, mask, masked_rsqs + + +def clean_with_rsq_and_get_fwhm(masked_fwhms, + masked_rsqs, + references, + min_fwhm_references=2, + min_references=6, + rsq_min=0.15): + """ + Clean references and obtain fwhm using RSQ values. + Args: + masked_fwhms (np.ma.maskedarray): array of fwhms + masked_rsqs (np.ma.maskedarray): array of rsq values + references (astropy.table.Table): table or reference stars + min_fwhm_references: (Default 2) min stars to get a fwhm + min_references: (Default 6) min stars to aim for when cutting by R2 + rsq_min: (Default 0.15) min rsq value + """ + min_references_now = min_references + rsqvals = np.arange(rsq_min, 0.95, 0.15)[::-1] + fwhm_found = False + min_references_achieved = False + + # Clean based on R^2 Value + while not min_references_achieved: + for rsqval in rsqvals: + mask = (masked_rsqs >= rsqval) & (masked_rsqs < 1.0) + nreferences = np.sum(np.isfinite(masked_fwhms[mask])) + if nreferences >= min_fwhm_references: + _fwhms_cut_ = np.nanmean( + sigma_clip(masked_fwhms[mask], maxiters=100, sigma=2.0)) + #logger.info('R^2 >= ' + str(rsqval) + ': ' + str( + # np.sum(np.isfinite(masked_fwhms[mask]))) + ' stars w/ mean FWHM = ' + str(np.round(_fwhms_cut_, 1))) + if not fwhm_found: + fwhm = _fwhms_cut_ + fwhm_found = True + if nreferences >= min_references_now: + references = references[mask] + min_references_achieved = True + break + if min_references_achieved: break + min_references_now = min_references_now - 2 + if (min_references_now < 2) and fwhm_found: + break + elif not fwhm_found: + raise Exception("Could not estimate FWHM") + #logger.debug('{} {} {}'.format(min_references_now, min_fwhm_references, nreferences)) + + #logger.info("FWHM: %f", fwhm) + if np.isnan(fwhm): + raise Exception("Could not estimate FWHM") + + # if minimum references not found, then take what we can get with even a weaker cut. + # TODO: Is this right, or should we grab rsq_min (or even weaker?) + min_references_now = min_references - 2 + while not min_references_achieved: + mask = (masked_rsqs >= rsq_min) & (masked_rsqs < 1.0) + nreferences = np.sum(np.isfinite(masked_fwhms[mask])) + if nreferences >= min_references_now: + references = references[mask] + min_references_achieved = True + rsq_min = rsq_min - 0.07 + min_references_now = min_references_now - 1 + + # Check len of references as this is a destructive cleaning. + # if len(references) == 2: logger.info('2 reference stars remaining, check WCS and image quality') + if len(references) < 2: + raise Exception("{} References remaining; could not clean.".format( + len(references))) + return fwhm, references + + +def mkposxy(posx, posy): + '''Make 2D np array for astroalign''' + img_posxy = np.array([[x, y] for x, y in zip(posx, posy)], dtype="float64") + return img_posxy + + +def try_transform(source, target, pixeltol=2, nnearest=5, max_stars=50): + aa.NUM_NEAREST_NEIGHBORS = nnearest + aa.PIXEL_TOL = pixeltol + transform, (sourcestars, + targetstars) = aa.find_transform(source, + target, + max_control_points=max_stars) + return sourcestars, targetstars + + +def try_astroalign(source, target, pixeltol=2, nnearest=5, max_stars_n=50): + # Get indexes of matched stars + success = False + try: + source_stars, target_stars = try_transform(source, + target, + pixeltol=pixeltol, + nnearest=nnearest, + max_stars=max_stars_n) + source_ind = np.argwhere(np.in1d(source, source_stars)[::2]).flatten() + target_ind = np.argwhere(np.in1d(target, target_stars)[::2]).flatten() + success = True + except aa.MaxIterError: + source_ind, target_ind = 'None', 'None' + return source_ind, target_ind, success + + +def min_to_max_astroalign(source, + target, + fwhm=5, + fwhm_min=1, + fwhm_max=4, + knn_min=5, + knn_max=20, + max_stars=100, + min_matches=3): + '''Try to find matches using astroalign asterisms by stepping through some parameters.''' + # Set max_control_points par based on number of stars and max_stars. + nstars = max(len(source), len(source)) + if max_stars >= nstars: max_stars_list = 'None' + else: + if max_stars > 60: max_stars_list = (max_stars, 50, 4, 3) + else: max_stars_list = (max_stars, 6, 4, 3) + + # Create max_stars step-through list if not given + if max_stars_list == 'None': + if nstars > 6: + max_stars_list = (nstars, 5, 3) + elif nstars > 3: + max_stars_list = (nstars, 3) + + pixeltols = np.linspace(int(fwhm * fwhm_min), + int(fwhm * fwhm_max), + 4, + dtype=int) + nearest_neighbors = np.linspace(knn_min, + min(knn_max, nstars), + 4, + dtype=int) + + for max_stars_n in max_stars_list: + for pixeltol in pixeltols: + for nnearest in nearest_neighbors: + source_ind, target_ind, success = try_astroalign( + source, + target, + pixeltol=pixeltol, + nnearest=nnearest, + max_stars_n=max_stars_n) + if success: + if len(source_ind) >= min_matches: + return source_ind, target_ind, success + else: + success = False + return 'None', 'None', success + + +def kdtree(source, target, fwhm=5, fwhm_max=4, min_matches=3): + '''Use KDTree to get nearest neighbor matches within fwhm_max*fwhm distance''' + + # Use KDTree to rapidly efficiently query nearest neighbors + from scipy.spatial import KDTree + tt = KDTree(target) + st = KDTree(source) + matches_list = st.query_ball_tree(tt, r=fwhm * fwhm_max) + + #indx = [] + targets = [] + sources = [] + for j, (sstar, match) in enumerate(zip(source, matches_list)): + if np.array(target[match]).size != 0: + targets.append(match[0]) + sources.append(j) + sources = np.array(sources, dtype=int) + targets = np.array(targets, dtype=int) + # Return indexes of matches + return sources, targets, len(sources) >= min_matches + + +def get_new_wcs(extracted_ind, + extracted_stars, + clean_references, + ref_ind, + obstime, + rakey='ra_obs', + deckey='decl_obs'): + targets = (extracted_stars[extracted_ind][:, 0], + extracted_stars[extracted_ind][:, 1]) + c = coords.SkyCoord(clean_references[rakey][ref_ind], + clean_references[deckey][ref_ind], + obstime=obstime) + return wcs.utils.fit_wcs_from_points(targets, c) + + +def get_clean_references(references, + masked_rsqs, + min_references_ideal=6, + min_references_abs=3, + rsq_min=0.15, + rsq_ideal=0.5, + keep_max=100, + rescue_bad: bool = True): + + # Greedy first try + mask = (masked_rsqs >= rsq_ideal) & (masked_rsqs < 1.0) + if np.sum(np.isfinite(masked_rsqs[mask])) >= min_references_ideal: + if len(references[mask]) <= keep_max: + return references[mask] + elif len(references[mask]) >= keep_max: + import pandas as pd # @TODO: Convert to pure numpy implementation + df = pd.DataFrame(masked_rsqs, columns=['rsq']) + masked_rsqs.mask = ~mask + nmasked_rsqs = df.sort_values('rsq', + ascending=False).dropna().index._data + return references[nmasked_rsqs[:keep_max]] + + # Desperate second try + mask = (masked_rsqs >= rsq_min) & (masked_rsqs < 1.0) + masked_rsqs.mask = ~mask + + # Switching to pandas for easier selection + import pandas as pd # @TODO: Convert to pure numpy implementation + df = pd.DataFrame(masked_rsqs, columns=['rsq']) + nmasked_rsqs = deepcopy( + df.sort_values('rsq', ascending=False).dropna().index._data) + nmasked_rsqs = nmasked_rsqs[:min(min_references_ideal, len(nmasked_rsqs))] + if len(nmasked_rsqs) >= min_references_abs: + return references[nmasked_rsqs] + if not rescue_bad: + raise MinStarError( + 'Less than {} clean stars and rescue_bad = False'.format( + min_references_abs)) + + # Extremely desperate last ditch attempt i.e. "rescue bad" + elif rescue_bad: + mask = (masked_rsqs >= 0.02) & (masked_rsqs < 1.0) + masked_rsqs.mask = ~mask + + # Switch to pandas + df = pd.DataFrame(masked_rsqs, columns=['rsq']) + nmasked_rsqs = df.sort_values('rsq', + ascending=False).dropna().index._data + nmasked_rsqs = nmasked_rsqs[:min(min_references_ideal, len(nmasked_rsqs + ))] + if len(nmasked_rsqs) < 2: + raise MinStarError('Less than 2 clean stars.') + return references[nmasked_rsqs] # Return if len >= 2 + # Checks whether sensible input arrays and parameters were provided + raise ValueError( + 'input parameters were wrong, you should not reach here. Check that rescue_bad is True or False.' + ) diff --git a/requirements.txt b/requirements.txt index db9c492..f9f51d3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,9 +8,10 @@ Bottleneck == 1.3.2 matplotlib == 3.3.1 mplcursors == 0.3 seaborn +pandas requests -astropy < 4.0 -photutils < 0.7 +astropy == 4.1 +photutils >= 1.0.2 PyYAML psycopg2-binary jplephem @@ -20,3 +21,6 @@ tqdm pytz beautifulsoup4 git+https://github.com/obscode/imagematch.git@photutils#egg=imagematch +sep +astroalign > 2.3 +networkx diff --git a/run_photometry.py b/run_photometry.py index d2daf93..3beed68 100644 --- a/run_photometry.py +++ b/run_photometry.py @@ -13,137 +13,152 @@ import multiprocessing from flows import api, photometry, load_config -#-------------------------------------------------------------------------------------------------- -def process_fileid(fid, output_folder_root=None, attempt_imagematch=True, autoupload=False): - - logger = logging.getLogger('flows') - logging.captureWarnings(True) - logger_warn = logging.getLogger('py.warnings') - formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') - - datafile = api.get_datafile(fid) - target_name = datafile['target_name'] - - # Folder to save output: - output_folder = os.path.join(output_folder_root, target_name, '%05d' % fid) - - photfile = None - _filehandler = None - try: - # Set the status to indicate that we have started processing: - api.set_photometry_status(fid, 'running') - - # Create the output directory if it doesn't exist: - os.makedirs(output_folder, exist_ok=True) - - # Also write any logging output to the - _filehandler = logging.FileHandler(os.path.join(output_folder, 'photometry.log'), mode='w') - _filehandler.setFormatter(formatter) - _filehandler.setLevel(logging.INFO) - logger.addHandler(_filehandler) - logger_warn.addHandler(_filehandler) - - photfile = photometry(fileid=fid, - output_folder=output_folder, - attempt_imagematch=attempt_imagematch) - - except (SystemExit, KeyboardInterrupt): - logger.error("Aborted by user or system.") - if os.path.exists(output_folder): - shutil.rmtree(output_folder, ignore_errors=True) - photfile = None - api.set_photometry_status(fid, 'abort') - - except: # noqa: E722, pragma: no cover - logger.exception("Photometry failed") - photfile = None - api.set_photometry_status(fid, 'error') - - if _filehandler is not None: - logger.removeHandler(_filehandler) - logger_warn.removeHandler(_filehandler) - - if photfile is not None: - if autoupload: - api.upload_photometry(fid, delete_completed=True) - api.set_photometry_status(fid, 'done') - - return photfile - -#-------------------------------------------------------------------------------------------------- + +# -------------------------------------------------------------------------------------------------- +def process_fileid(fid, output_folder_root=None, attempt_imagematch=True, autoupload=False, keep_diff_fixed=False, + timeoutpar='None'): + logger = logging.getLogger('flows') + logging.captureWarnings(True) + logger_warn = logging.getLogger('py.warnings') + formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', "%Y-%m-%d %H:%M:%S") + + datafile = api.get_datafile(fid) + target_name = datafile['target_name'] + + # Folder to save output: + output_folder = os.path.join(output_folder_root, target_name, '%05d' % fid) + + photfile = None + _filehandler = None + try: + # Set the status to indicate that we have started processing: + api.set_photometry_status(fid, 'running') + + # Create the output directory if it doesn't exist: + os.makedirs(output_folder, exist_ok=True) + + # Also write any logging output to the + _filehandler = logging.FileHandler(os.path.join(output_folder, 'photometry.log'), mode='w') + _filehandler.setFormatter(formatter) + _filehandler.setLevel(logging.INFO) + logger.addHandler(_filehandler) + logger_warn.addHandler(_filehandler) + + photfile = photometry(fileid=fid, + output_folder=output_folder, + attempt_imagematch=attempt_imagematch, + keep_diff_fixed=keep_diff_fixed, + timeoutpar=timeoutpar) + + except (SystemExit, KeyboardInterrupt): + logger.error("Aborted by user or system.") + if os.path.exists(output_folder): + shutil.rmtree(output_folder, ignore_errors=True) + photfile = None + api.set_photometry_status(fid, 'abort') + + except: # noqa: E722, pragma: no cover + logger.exception("Photometry failed") + photfile = None + api.set_photometry_status(fid, 'error') + + if _filehandler is not None: + logger.removeHandler(_filehandler) + logger_warn.removeHandler(_filehandler) + + if photfile is not None: + if autoupload: + api.upload_photometry(fid, delete_completed=True) + api.set_photometry_status(fid, 'done') + + return photfile + + +# -------------------------------------------------------------------------------------------------- if __name__ == '__main__': - # Parse command line arguments: - parser = argparse.ArgumentParser(description='Run photometry pipeline.') - parser.add_argument('-d', '--debug', help='Print debug messages.', action='store_true') - parser.add_argument('-q', '--quiet', help='Only report warnings and errors.', action='store_true') - parser.add_argument('-o', '--overwrite', help='Overwrite existing results.', action='store_true') - - group = parser.add_argument_group('Selecting which files to process') - group.add_argument('--fileid', help="Process this file ID. Overrides all other filters.", type=int, default=None) - group.add_argument('--targetid', help="Only process files from this target.", type=int, default=None) - group.add_argument('--filter', type=str, default=None, choices=['missing','all','error']) - - group = parser.add_argument_group('Processing details') - group.add_argument('--threads', type=int, default=1, help="Number of parallel threads to use.") - group.add_argument('--no-imagematch', help="Disable ImageMatch.", action='store_true') - group.add_argument('--autoupload', help="Automatically upload completed photometry to Flows website. Only do this, if you know what you are doing!", action='store_true') - args = parser.parse_args() - - # Ensure that all input has been given: - if not args.fileid and not args.targetid and args.filter is None: - parser.error("Please select either a specific FILEID .") - - # Set logging level: - logging_level = logging.INFO - if args.quiet: - logging_level = logging.WARNING - elif args.debug: - logging_level = logging.DEBUG - - # Number of threads to use: - threads = args.threads - if threads <= 0: - threads = multiprocessing.cpu_count() - - # Setup logging: - formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') - console = logging.StreamHandler() - console.setFormatter(formatter) - logger = logging.getLogger('flows') - if not logger.hasHandlers(): - logger.addHandler(console) - logger.setLevel(logging_level) - - if args.fileid is not None: - # Run the specified fileid: - fileids = [args.fileid] - else: - # Ask the API for a list of fileids which are yet to be processed: - fileids = api.get_datafiles(targetid=args.targetid, filt=args.filter) - - config = load_config() - output_folder_root = config.get('photometry', 'output', fallback='.') - - # Create function wrapper: - process_fileid_wrapper = functools.partial(process_fileid, - output_folder_root=output_folder_root, - attempt_imagematch=not args.no_imagematch, - autoupload=args.autoupload) - - if threads > 1: - # Disable printing info messages from the parent function. - # It is going to be all jumbled up anyway. - #logger.setLevel(logging.WARNING) - - # There is more than one area to process, so let's start - # a process pool and process them in parallel: - with multiprocessing.Pool(threads) as pool: - pool.map(process_fileid_wrapper, fileids) - - else: - # Only single thread so simply run it directly: - for fid in fileids: - print("="*72) - print(fid) - print("="*72) - process_fileid_wrapper(fid) + # Parse command line arguments: + parser = argparse.ArgumentParser(description='Run photometry pipeline.') + parser.add_argument('-d', '--debug', help='Print debug messages.', action='store_true') + parser.add_argument('-q', '--quiet', help='Only report warnings and errors.', action='store_true') + parser.add_argument('-o', '--overwrite', help='Overwrite existing results.', action='store_true') + + group = parser.add_argument_group('Selecting which files to process') + group.add_argument('--fileid', help="Process this file ID. Overrides all other filters.", type=int, default=None) + group.add_argument('--targetid', help="Only process files from this target.", type=int, default=None) + group.add_argument('--filter', type=str, default=None, choices=['missing', 'all', 'error']) + + group = parser.add_argument_group('Processing details') + group.add_argument('--threads', type=int, default=1, help="Number of parallel threads to use.") + group.add_argument('--no-imagematch', help="Disable ImageMatch.", action='store_true') + group.add_argument('--autoupload', + help="Automatically upload completed photometry to Flows website. \ + Only do this, if you know what you are doing!", + action='store_true') + group.add_argument('--fixposdiff', + help="Fix SN position during PSF photometry of difference image. \ + Useful when difference image is noisy.", + action='store_true') + group.add_argument('--timeoutpar', type=int, default='None', help="Timeout in Seconds for WCS") + args = parser.parse_args() + + # Ensure that all input has been given: + if not args.fileid and not args.targetid and args.filter is None: + parser.error("Please select either a specific FILEID .") + + # Set logging level: + logging_level = logging.INFO + if args.quiet: + logging_level = logging.WARNING + elif args.debug: + logging_level = logging.DEBUG + + # Number of threads to use: + threads = args.threads + if threads <= 0: + threads = multiprocessing.cpu_count() + + # Setup logging: + formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', "%Y-%m-%d %H:%M:%S") + console = logging.StreamHandler() + console.setFormatter(formatter) + logger = logging.getLogger('flows') + if not logger.hasHandlers(): + logger.addHandler(console) + logger.propagate = False + logger.setLevel(logging_level) + + if args.fileid is not None: + # Run the specified fileid: + fileids = [args.fileid] + else: + # Ask the API for a list of fileids which are yet to be processed: + fileids = api.get_datafiles(targetid=args.targetid, filt=args.filter) + + config = load_config() + output_folder_root = config.get('photometry', 'output', fallback='.') + + # Create function wrapper: + process_fileid_wrapper = functools.partial(process_fileid, + output_folder_root=output_folder_root, + attempt_imagematch=not args.no_imagematch, + autoupload=args.autoupload, + keep_diff_fixed=args.fixposdiff, + timeoutpar=args.timeoutpar) + + if threads > 1: + # Disable printing info messages from the parent function. + # It is going to be all jumbled up anyway. + # logger.setLevel(logging.WARNING) + + # There is more than one area to process, so let's start + # a process pool and process them in parallel: + with multiprocessing.Pool(threads) as pool: + pool.map(process_fileid_wrapper, fileids) + + else: + # Only single thread so simply run it directly: + for fid in fileids: + print("=" * 72) + print(fid) + print("=" * 72) + process_fileid_wrapper(fid) diff --git a/run_plotlc.py b/run_plotlc.py index d64072b..2490332 100644 --- a/run_plotlc.py +++ b/run_plotlc.py @@ -28,10 +28,11 @@ def main(): Can be either the SN name (e.g. 2019yvr) or the Flows target ID.""") parser.add_argument('--fileid', '-i', type=int, nargs='*', default=None, help='Specific file ids within target separated by spaces: -i ') - parser.add_argument('--filters', '-f', type=str, nargs='*', default=None, choices=all_filters, - help='List of space delimited filters. If not provided will use all') + parser.add_argument('--filters', '-f', type=str, nargs='*', default=None, + help='List of space delimited filters. If not provided will use all. Choose between {}'.format(all_filters)) parser.add_argument('--offset', '-jd', type=float, default=2458800.0) parser.add_argument('--subonly', help='Only show template subtracted data points.', action='store_true') + parser.add_argument('--hidpi', help='double DPI fo 4k resolution', action='store_true') args = parser.parse_args() # To use when only plotting some filters @@ -110,7 +111,8 @@ def main(): # Create the plot: plots_interactive() sns.set(style='ticks') - fig, ax = plt.subplots(figsize=(6.4,4), dpi=130) + dpi_mult = 1 if not args.subonly else 2 + fig, ax = plt.subplots(figsize=(6.4,4), dpi=130*dpi_mult) fig.subplots_adjust(top=0.95, left=0.1, bottom=0.1, right=0.97) cps = sns.color_palette() diff --git a/tests/test_photometry.py b/tests/test_photometry.py index 38bc9a1..15b379f 100644 --- a/tests/test_photometry.py +++ b/tests/test_photometry.py @@ -8,7 +8,7 @@ import pytest import conftest # noqa: F401 -from flows import photometry +from flows import photometry # noqa: F401 #-------------------------------------------------------------------------------------------------- def test_import_photometry():