From 3657df54c5dbc781643ae650bcc84a7d4b246a96 Mon Sep 17 00:00:00 2001 From: Ramiro Torres-Escobedo Date: Thu, 2 Feb 2023 16:56:12 +0800 Subject: [PATCH 1/9] formatting code --- hawc_hal/HAL.py | 143 ++++++++--- .../convenience_functions/fit_point_source.py | 33 ++- hawc_hal/convolved_source/__init__.py | 5 +- .../convolved_extended_source.py | 120 ++++++--- .../convolved_point_source.py | 91 ++++--- .../convolved_source_container.py | 2 + hawc_hal/flat_sky_projection.py | 31 ++- hawc_hal/healpix_handling/__init__.py | 2 +- .../healpix_handling/flat_sky_to_healpix.py | 40 +-- .../healpix_handling/gnomonic_projection.py | 63 ++--- hawc_hal/healpix_handling/healpix_utils.py | 2 +- hawc_hal/healpix_handling/sparse_healpix.py | 9 +- hawc_hal/interpolation/__init__.py | 2 +- .../fast_bilinar_interpolation.py | 31 ++- hawc_hal/interpolation/irregular_grid.py | 7 +- .../interpolation/log_log_interpolation.py | 5 +- hawc_hal/log_likelihood.py | 16 +- hawc_hal/maptree/data_analysis_bin.py | 37 ++- hawc_hal/maptree/map_tree.py | 18 +- hawc_hal/obsolete/fast_convolution.py | 34 +-- hawc_hal/obsolete/image_to_healpix.py | 44 +++- hawc_hal/obsolete/tf1_wrapper.py | 5 +- hawc_hal/obsolete/ts_map.py | 235 ++++++++++-------- hawc_hal/psf_fast/__init__.py | 2 +- hawc_hal/psf_fast/psf_convolutor.py | 32 ++- hawc_hal/psf_fast/psf_interpolator.py | 48 ++-- hawc_hal/psf_fast/psf_wrapper.py | 12 +- hawc_hal/region_of_interest/__init__.py | 2 +- .../region_of_interest/healpix_cone_roi.py | 48 ++-- .../region_of_interest/healpix_map_roi.py | 107 +++++--- .../region_of_interest/healpix_roi_base.py | 21 +- hawc_hal/response/__init__.py | 2 +- hawc_hal/response/response_bin.py | 23 +- hawc_hal/serialize.py | 9 +- hawc_hal/sphere_dist.py | 2 +- hawc_hal/tests/conftest.py | 12 +- hawc_hal/tests/test_bilinar_interpolation.py | 8 +- hawc_hal/tests/test_complete_analysis.py | 48 ++-- hawc_hal/tests/test_copy.py | 11 +- hawc_hal/tests/test_geminga_paper.py | 185 +++++++++----- hawc_hal/tests/test_healpixRoi.py | 161 ++++++------ hawc_hal/tests/test_maptree.py | 17 +- hawc_hal/tests/test_model_residual_maps.py | 44 ++-- hawc_hal/tests/test_n_transits.py | 4 +- hawc_hal/tests/test_on_point_source.py | 61 +++-- hawc_hal/tests/test_psf.py | 1 + hawc_hal/tests/test_roi.py | 104 ++++---- hawc_hal/tests/test_root_to_hdf.py | 22 +- hawc_hal/tests/test_serialization.py | 28 +-- hawc_hal/util.py | 8 +- 50 files changed, 1243 insertions(+), 754 deletions(-) diff --git a/hawc_hal/HAL.py b/hawc_hal/HAL.py index 22ed34f..92bc520 100644 --- a/hawc_hal/HAL.py +++ b/hawc_hal/HAL.py @@ -82,7 +82,9 @@ def __init__( # Set up the flat-sky projection self.flat_sky_pixels_size = flat_sky_pixels_size - self._flat_sky_projection = self._roi.get_flat_sky_projection(self.flat_sky_pixels_size) + self._flat_sky_projection = self._roi.get_flat_sky_projection( + self.flat_sky_pixels_size + ) # Read map tree (data) self._maptree = map_tree_factory(maptree, roi=self._roi, n_transits=n_transits) @@ -206,7 +208,9 @@ def psf_integration_method(self, mode): def _setup_psf_convolutors(self): - central_response_bins = self._response.get_response_dec_bin(self._roi.ra_dec_center[1]) + central_response_bins = self._response.get_response_dec_bin( + self._roi.ra_dec_center[1] + ) self._psf_convolutors = collections.OrderedDict() for bin_id in central_response_bins: @@ -279,7 +283,9 @@ def set_active_measurements(self, bin_id_min=None, bin_id_max=None, bin_list=Non for this_bin in range(bin_id_min, bin_id_max + 1): this_bin = str(this_bin) if this_bin not in self._all_planes: - raise ValueError(f"Bin {this_bin} is not contained in this maptree.") + raise ValueError( + f"Bin {this_bin} is not contained in this maptree." + ) self._active_planes.append(this_bin) @@ -297,7 +303,9 @@ def set_active_measurements(self, bin_id_min=None, bin_id_max=None, bin_list=Non # if not this_bin in self._all_planes: if this_bin not in self._all_planes: - raise ValueError(f"Bin {this_bin} is not contained in this maptree.") + raise ValueError( + f"Bin {this_bin} is not contained in this maptree." + ) self._active_planes.append(this_bin) @@ -452,7 +460,9 @@ def get_excess_background(self, ra: float, dec: float, radius: float): # each radial bin data = data_analysis_bin.observation_map.as_partial() bkg = data_analysis_bin.background_map.as_partial() - mdl = self._get_model_map(energy_id, n_point_sources, n_ext_sources).as_partial() + mdl = self._get_model_map( + energy_id, n_point_sources, n_ext_sources + ).as_partial() # select counts only from the pixels within specifid distance from # origin of radial profile @@ -519,7 +529,10 @@ def get_radial_profile( # The area of each ring is then given by the difference between two # subsequent circe areas. area = np.array( - [self.get_excess_background(ra, dec, r + offset * delta_r)[0] for r in radii] + [ + self.get_excess_background(ra, dec, r + offset * delta_r)[0] + for r in radii + ] ) temp = area[1:] - area[:-1] @@ -527,7 +540,10 @@ def get_radial_profile( # signals signal = np.array( - [self.get_excess_background(ra, dec, r + offset * delta_r)[1] for r in radii] + [ + self.get_excess_background(ra, dec, r + offset * delta_r)[1] + for r in radii + ] ) temp = signal[1:] - signal[:-1] @@ -535,7 +551,10 @@ def get_radial_profile( # backgrounds bkg = np.array( - [self.get_excess_background(ra, dec, r + offset * delta_r)[2] for r in radii] + [ + self.get_excess_background(ra, dec, r + offset * delta_r)[2] + for r in radii + ] ) temp = bkg[1:] - bkg[:-1] @@ -546,7 +565,10 @@ def get_radial_profile( # model # convert 'top hat' excess into 'ring' excesses. model = np.array( - [self.get_excess_background(ra, dec, r + offset * delta_r)[3] for r in radii] + [ + self.get_excess_background(ra, dec, r + offset * delta_r)[3] + for r in radii + ] ) temp = model[1:] - model[:-1] @@ -557,7 +579,10 @@ def get_radial_profile( self.set_model(model_to_subtract) model_subtract = np.array( - [self.get_excess_background(ra, dec, r + offset * delta_r)[3] for r in radii] + [ + self.get_excess_background(ra, dec, r + offset * delta_r)[3] + for r in radii + ] ) temp = model_subtract[1:] - model_subtract[:-1] @@ -577,11 +602,17 @@ def get_radial_profile( # them to the data later. Weight is normalized (sum of weights over # the bins = 1). - total_excess = np.array(self.get_excess_background(ra, dec, max_radius)[1])[good_planes] + total_excess = np.array(self.get_excess_background(ra, dec, max_radius)[1])[ + good_planes + ] - total_bkg = np.array(self.get_excess_background(ra, dec, max_radius)[2])[good_planes] + total_bkg = np.array(self.get_excess_background(ra, dec, max_radius)[2])[ + good_planes + ] - total_model = np.array(self.get_excess_background(ra, dec, max_radius)[3])[good_planes] + total_model = np.array(self.get_excess_background(ra, dec, max_radius)[3])[ + good_planes + ] w = np.divide(total_model, total_bkg) weight = np.array([w / np.sum(w) for _ in radii]) @@ -635,7 +666,13 @@ def plot_radial_profile( values for easy retrieval """ - (radii, excess_model, excess_data, excess_error, plane_ids,) = self.get_radial_profile( + ( + radii, + excess_model, + excess_data, + excess_error, + plane_ids, + ) = self.get_radial_profile( ra, dec, active_planes, @@ -667,7 +704,9 @@ def plot_radial_profile( plt.plot(radii, excess_model, color="red", label="Model") - plt.legend(bbox_to_anchor=(1.0, 1.0), loc="upper right", numpoints=1, fontsize=16) + plt.legend( + bbox_to_anchor=(1.0, 1.0), loc="upper right", numpoints=1, fontsize=16 + ) plt.axhline(0, color="deepskyblue", linestyle="--") x_limits = [0, max_radius] @@ -777,7 +816,9 @@ def display_spectrum(self): yerr = [yerr_high, yerr_low] - return self._plot_spectrum(net_counts, yerr, model_only, residuals, residuals_err) + return self._plot_spectrum( + net_counts, yerr, model_only, residuals, residuals_err + ) def _plot_spectrum(self, net_counts, yerr, model_only, residuals, residuals_err): @@ -851,7 +892,9 @@ def get_log_like(self, individual_bins=False, return_null=False): bkg_renorm = list(self._nuisance_parameters.values())[0].value obs = data_analysis_bin.observation_map.as_partial() # type: np.array - bkg = data_analysis_bin.background_map.as_partial() * bkg_renorm # type: np.array + bkg = ( + data_analysis_bin.background_map.as_partial() * bkg_renorm + ) # type: np.array this_pseudo_log_like = log_likelihood(obs, bkg, this_model_map_hpx) @@ -944,7 +987,9 @@ def get_simulated_dataset(self, name): # Active plane. Generate new data expectation = self._clone[1][bin_id] - new_data = np.random.poisson(expectation, size=(1, expectation.shape[0])).flatten() + new_data = np.random.poisson( + expectation, size=(1, expectation.shape[0]) + ).flatten() # Substitute data data_analysis_bin.observation_map.set_new_values(new_data) @@ -954,16 +999,18 @@ def get_simulated_dataset(self, name): # Adjust the name of the nuisance parameter old_name = list(self._clone[0]._nuisance_parameters.keys())[0] new_name = old_name.replace(self.name, name) - self._clone[0]._nuisance_parameters[new_name] = self._clone[0]._nuisance_parameters.pop( - old_name - ) + self._clone[0]._nuisance_parameters[new_name] = self._clone[ + 0 + ]._nuisance_parameters.pop(old_name) # Recompute biases self._clone[0]._compute_likelihood_biases() return self._clone[0] - def _get_expectation(self, data_analysis_bin, energy_bin_id, n_point_sources, n_ext_sources): + def _get_expectation( + self, data_analysis_bin, energy_bin_id, n_point_sources, n_ext_sources + ): # Compute the expectation from the model @@ -979,7 +1026,9 @@ def _get_expectation(self, data_analysis_bin, energy_bin_id, n_point_sources, n_ psf_integration_method=self._psf_integration_method, ) - expectation_from_this_source = expectation_per_transit * data_analysis_bin.n_transits + expectation_from_this_source = ( + expectation_per_transit * data_analysis_bin.n_transits + ) if this_model_map is None: @@ -1018,14 +1067,18 @@ def _get_expectation(self, data_analysis_bin, energy_bin_id, n_point_sources, n_ # Only extended sources this_model_map = ( - self._psf_convolutors[energy_bin_id].extended_source_image(this_ext_model_map) + self._psf_convolutors[energy_bin_id].extended_source_image( + this_ext_model_map + ) * data_analysis_bin.n_transits ) else: this_model_map += ( - self._psf_convolutors[energy_bin_id].extended_source_image(this_ext_model_map) + self._psf_convolutors[energy_bin_id].extended_source_image( + this_ext_model_map + ) * data_analysis_bin.n_transits ) @@ -1035,14 +1088,18 @@ def _get_expectation(self, data_analysis_bin, energy_bin_id, n_point_sources, n_ # First divide for the pixel area because we need to interpolate brightness # this_model_map = old_div(this_model_map, self._flat_sky_projection.project_plane_pixel_area) - this_model_map = this_model_map / self._flat_sky_projection.project_plane_pixel_area + this_model_map = ( + this_model_map / self._flat_sky_projection.project_plane_pixel_area + ) this_model_map_hpx = self._flat_sky_to_healpix_transform[energy_bin_id]( this_model_map, fill_value=0.0 ) # Now multiply by the pixel area of the new map to go back to flux - this_model_map_hpx *= hp.nside2pixarea(data_analysis_bin.nside, degrees=True) + this_model_map_hpx *= hp.nside2pixarea( + data_analysis_bin.nside, degrees=True + ) else: @@ -1117,7 +1174,9 @@ def display_fit(self, smoothing_kernel_sigma=0.1, display_colorbar=False): this_ra, this_dec = self._roi.ra_dec_center # Make a full healpix map for a second - whole_map = self._get_model_map(plane_id, n_point_sources, n_ext_sources).as_dense() + whole_map = self._get_model_map( + plane_id, n_point_sources, n_ext_sources + ).as_dense() # Healpix uses longitude between -180 and 180, while R.A. is between 0 and 360. We need to fix that: longitude = ra_to_longitude(this_ra) @@ -1126,7 +1185,9 @@ def display_fit(self, smoothing_kernel_sigma=0.1, display_colorbar=False): latitude = this_dec # Background and excess maps - bkg_subtracted, _, background_map = self._get_excess(data_analysis_bin, all_maps=True) + bkg_subtracted, _, background_map = self._get_excess( + data_analysis_bin, all_maps=True + ) # Make all the projections: model, excess, background, residuals proj_model = self._represent_healpix_map( @@ -1160,11 +1221,15 @@ def display_fit(self, smoothing_kernel_sigma=0.1, display_colorbar=False): vmax = max(np.nanmax(proj_model), np.nanmax(proj_data)) # Plot model - images[0] = subs[i][0].imshow(proj_model, origin="lower", vmin=vmin, vmax=vmax) + images[0] = subs[i][0].imshow( + proj_model, origin="lower", vmin=vmin, vmax=vmax + ) subs[i][0].set_title("model, bin {}".format(data_analysis_bin.name)) # Plot data map - images[1] = subs[i][1].imshow(proj_data, origin="lower", vmin=vmin, vmax=vmax) + images[1] = subs[i][1].imshow( + proj_data, origin="lower", vmin=vmin, vmax=vmax + ) subs[i][1].set_title("excess, bin {}".format(data_analysis_bin.name)) # Plot background map. @@ -1287,7 +1352,9 @@ def _get_model_map(self, plane_id, n_pt_src, n_ext_src): raise ValueError(f"{plane_id} not a plane in the current model") model_map = SparseHealpix( - self._get_expectation(self._maptree[plane_id], plane_id, n_pt_src, n_ext_src), + self._get_expectation( + self._maptree[plane_id], plane_id, n_pt_src, n_ext_src + ), self._active_pixels[plane_id], self._maptree[plane_id].observation_map.nside, ) @@ -1361,13 +1428,17 @@ def _write_a_map(self, file_name, which, fluctuate=False, return_map=False): if return_map: return new_map_tree - def write_model_map(self, file_name, poisson_fluctuate=False, test_return_map=False): + def write_model_map( + self, file_name, poisson_fluctuate=False, test_return_map=False + ): """ This function writes the model map to a file. The interface is based off of HAWCLike for consistency """ if test_return_map: - log.warning("test_return_map=True should only be used for testing purposes!") + log.warning( + "test_return_map=True should only be used for testing purposes!" + ) return self._write_a_map(file_name, "model", poisson_fluctuate, test_return_map) def write_residual_map(self, file_name, test_return_map=False): @@ -1376,5 +1447,7 @@ def write_residual_map(self, file_name, test_return_map=False): The interface is based off of HAWCLike for consistency """ if test_return_map: - log.warning("test_return_map=True should only be used for testing purposes!") + log.warning( + "test_return_map=True should only be used for testing purposes!" + ) return self._write_a_map(file_name, "residual", False, test_return_map) diff --git a/hawc_hal/convenience_functions/fit_point_source.py b/hawc_hal/convenience_functions/fit_point_source.py index abb7f07..a1a44ec 100644 --- a/hawc_hal/convenience_functions/fit_point_source.py +++ b/hawc_hal/convenience_functions/fit_point_source.py @@ -3,26 +3,24 @@ from threeML import DataList, JointLikelihood -def fit_point_source(roi, - maptree, - response, - point_source_model, - bin_list, - confidence_intervals=False, - liff=False, - pixel_size=0.17, - verbose=False): +def fit_point_source( + roi, + maptree, + response, + point_source_model, + bin_list, + confidence_intervals=False, + liff=False, + pixel_size=0.17, + verbose=False, +): data_radius = roi.data_radius.to("deg").value if not liff: # This is a 3ML plugin - hawc = HAL("HAWC", - maptree, - response, - roi, - flat_sky_pixels_size=pixel_size) + hawc = HAL("HAWC", maptree, response, roi, flat_sky_pixels_size=pixel_size) hawc.set_active_measurements(bin_list=bin_list) @@ -30,10 +28,7 @@ def fit_point_source(roi, from threeML import HAWCLike - hawc = HAWCLike("HAWC", - maptree, - response, - fullsky=True) + hawc = HAWCLike("HAWC", maptree, response, fullsky=True) hawc.set_bin_list(bin_list) @@ -69,4 +64,4 @@ def fit_point_source(roi, ci = None - return param_df, like_df, ci, jl.results \ No newline at end of file + return param_df, like_df, ci, jl.results diff --git a/hawc_hal/convolved_source/__init__.py b/hawc_hal/convolved_source/__init__.py index d680135..dff1d09 100644 --- a/hawc_hal/convolved_source/__init__.py +++ b/hawc_hal/convolved_source/__init__.py @@ -1,4 +1,7 @@ from __future__ import absolute_import from .convolved_source_container import ConvolvedSourcesContainer from .convolved_point_source import ConvolvedPointSource -from .convolved_extended_source import ConvolvedExtendedSource2D, ConvolvedExtendedSource3D \ No newline at end of file +from .convolved_extended_source import ( + ConvolvedExtendedSource2D, + ConvolvedExtendedSource3D, +) diff --git a/hawc_hal/convolved_source/convolved_extended_source.py b/hawc_hal/convolved_source/convolved_extended_source.py index f76cf58..819feda 100644 --- a/hawc_hal/convolved_source/convolved_extended_source.py +++ b/hawc_hal/convolved_source/convolved_extended_source.py @@ -6,6 +6,7 @@ from astromodels import use_astromodels_memoization from threeML.io.logging import setup_logger + log = setup_logger(__name__) log.propagate = False @@ -24,12 +25,12 @@ def _select_with_wrap_around(arr, start, stop, wrap=(360, 0)): return idx + # Conversion factor between deg^2 and rad^2 deg2_to_rad2 = 0.00030461741978670857 class ConvolvedExtendedSource(object): - def __init__(self, source, response, flat_sky_projection): self._response = response @@ -43,7 +44,12 @@ def __init__(self, source, response, flat_sky_projection): # Find out the response bins we need to consider for this extended source # # Get the footprint (i..e, the coordinates of the 4 points limiting the projections) - (ra1, dec1), (ra2, dec2), (ra3, dec3), (ra4, dec4) = flat_sky_projection.wcs.calc_footprint() + ( + (ra1, dec1), + (ra2, dec2), + (ra3, dec3), + (ra4, dec4), + ) = flat_sky_projection.wcs.calc_footprint() (lon_start, lon_stop), (lat_start, lat_stop) = source.get_boundaries() @@ -56,44 +62,68 @@ def __init__(self, source, response, flat_sky_projection): upper_edges = np.array([x[-1] for x in response.dec_bins]) centers = np.array([x[1] for x in response.dec_bins]) - dec_bins_to_consider_idx = np.flatnonzero((upper_edges >= dec_min) & (lower_edges <= dec_max)) + dec_bins_to_consider_idx = np.flatnonzero( + (upper_edges >= dec_min) & (lower_edges <= dec_max) + ) # Wrap the selection so we have always one bin before and one after. # NOTE: we assume that the ROI do not overlap with the very first or the very last dec bin # Add one dec bin to cover the last part - dec_bins_to_consider_idx = np.append(dec_bins_to_consider_idx, [dec_bins_to_consider_idx[-1] + 1]) + dec_bins_to_consider_idx = np.append( + dec_bins_to_consider_idx, [dec_bins_to_consider_idx[-1] + 1] + ) # Add one dec bin to cover the first part - dec_bins_to_consider_idx = np.insert(dec_bins_to_consider_idx, 0, [dec_bins_to_consider_idx[0] - 1]) + dec_bins_to_consider_idx = np.insert( + dec_bins_to_consider_idx, 0, [dec_bins_to_consider_idx[0] - 1] + ) - self._dec_bins_to_consider = [response.response_bins[centers[x]] for x in dec_bins_to_consider_idx] + self._dec_bins_to_consider = [ + response.response_bins[centers[x]] for x in dec_bins_to_consider_idx + ] - log.info("Considering %i dec bins for extended source %s" % (len(self._dec_bins_to_consider), - self._name)) + log.info( + "Considering %i dec bins for extended source %s" + % (len(self._dec_bins_to_consider), self._name) + ) # Find central bin for the PSF dec_center = (lat_start + lat_stop) / 2.0 # - self._central_response_bins = response.get_response_dec_bin(dec_center, interpolate=False) + self._central_response_bins = response.get_response_dec_bin( + dec_center, interpolate=False + ) log.info("Central bin is bin at Declination = %.3f" % dec_center) # Take note of the pixels within the flat sky projection that actually need to be computed. If the extended # source is significantly smaller than the flat sky projection, this gains a substantial amount of time - idx_lon = _select_with_wrap_around(self._flat_sky_projection.ras, lon_start, lon_stop, (360, 0)) - idx_lat = _select_with_wrap_around(self._flat_sky_projection.decs, lat_start, lat_stop, (90, -90)) + idx_lon = _select_with_wrap_around( + self._flat_sky_projection.ras, lon_start, lon_stop, (360, 0) + ) + idx_lat = _select_with_wrap_around( + self._flat_sky_projection.decs, lat_start, lat_stop, (90, -90) + ) - self._active_flat_sky_mask = (idx_lon & idx_lat) + self._active_flat_sky_mask = idx_lon & idx_lat - assert np.sum(self._active_flat_sky_mask) > 0, "Mismatch between source %s and ROI" % self._name + assert np.sum(self._active_flat_sky_mask) > 0, ( + "Mismatch between source %s and ROI" % self._name + ) # Get the energies needed for the computation of the flux - self._energy_centers_keV = self._central_response_bins[list(self._central_response_bins.keys())[0]].sim_energy_bin_centers * 1e9 + self._energy_centers_keV = ( + self._central_response_bins[ + list(self._central_response_bins.keys())[0] + ].sim_energy_bin_centers + * 1e9 + ) # Prepare array for fluxes - self._all_fluxes = np.zeros((self._flat_sky_projection.ras.shape[0], - self._energy_centers_keV.shape[0])) + self._all_fluxes = np.zeros( + (self._flat_sky_projection.ras.shape[0], self._energy_centers_keV.shape[0]) + ) def _setup_callbacks(self, callback): @@ -118,10 +148,11 @@ def get_source_map(self, energy_bin_id, tag=None): class ConvolvedExtendedSource3D(ConvolvedExtendedSource): - def __init__(self, source, response, flat_sky_projection): - super(ConvolvedExtendedSource3D, self).__init__(source, response, flat_sky_projection) + super(ConvolvedExtendedSource3D, self).__init__( + source, response, flat_sky_projection + ) # We implement a caching system so that the source flux is evaluated only when strictly needed, # because it is the most computationally intense part otherwise. @@ -154,51 +185,76 @@ def get_source_map(self, energy_bin_id, tag=None): # Recompute the fluxes for the pixels that are covered by this extended source self._all_fluxes[self._active_flat_sky_mask, :] = self._source( - self._flat_sky_projection.ras[self._active_flat_sky_mask], - self._flat_sky_projection.decs[self._active_flat_sky_mask], - self._energy_centers_keV) # 1 / (keV cm^2 s rad^2) + self._flat_sky_projection.ras[self._active_flat_sky_mask], + self._flat_sky_projection.decs[self._active_flat_sky_mask], + self._energy_centers_keV, + ) # 1 / (keV cm^2 s rad^2) # We don't need to recompute the function anymore until a parameter changes self._recompute_flux = False # Now compute the expected signal - pixel_area_rad2 = self._flat_sky_projection.project_plane_pixel_area * deg2_to_rad2 + pixel_area_rad2 = ( + self._flat_sky_projection.project_plane_pixel_area * deg2_to_rad2 + ) this_model_image = np.zeros(self._all_fluxes.shape[0]) # Loop over the Dec bins that cover this source and compute the expected flux, interpolating between # two dec bins for each point - for dec_bin1, dec_bin2 in zip(self._dec_bins_to_consider[:-1], self._dec_bins_to_consider[1:]): + for dec_bin1, dec_bin2 in zip( + self._dec_bins_to_consider[:-1], self._dec_bins_to_consider[1:] + ): # Get the two response bins to consider this_response_bin1 = dec_bin1[energy_bin_id] this_response_bin2 = dec_bin2[energy_bin_id] # Figure out which pixels are between the centers of the dec bins we are considering - c1, c2 = this_response_bin1.declination_center, this_response_bin2.declination_center + c1, c2 = ( + this_response_bin1.declination_center, + this_response_bin2.declination_center, + ) - idx = (self._flat_sky_projection.decs >= c1) & (self._flat_sky_projection.decs < c2) & \ - self._active_flat_sky_mask + idx = ( + (self._flat_sky_projection.decs >= c1) + & (self._flat_sky_projection.decs < c2) + & self._active_flat_sky_mask + ) # Reweight the spectrum separately for the two bins # NOTE: the scale is the same because the sim_differential_photon_fluxes are the same (the simulation # used to make the response used the same spectrum for each bin). What changes between the two bins # is the observed signal per bin (the .sim_signal_events_per_bin member) - scale = old_div((self._all_fluxes[idx, :] * pixel_area_rad2), this_response_bin1.sim_differential_photon_fluxes) + scale = old_div( + (self._all_fluxes[idx, :] * pixel_area_rad2), + this_response_bin1.sim_differential_photon_fluxes, + ) # Compute the interpolation weights for the two responses w1 = old_div((self._flat_sky_projection.decs[idx] - c2), (c1 - c2)) w2 = old_div((self._flat_sky_projection.decs[idx] - c1), (c2 - c1)) - this_model_image[idx] = (w1 * np.sum(scale * this_response_bin1.sim_signal_events_per_bin, axis=1) + - w2 * np.sum(scale * this_response_bin2.sim_signal_events_per_bin, axis=1)) * \ - 1e9 + this_model_image[idx] = ( + w1 + * np.sum( + scale * this_response_bin1.sim_signal_events_per_bin, axis=1 + ) + + w2 + * np.sum( + scale * this_response_bin2.sim_signal_events_per_bin, axis=1 + ) + ) * 1e9 # Reshape the flux array into an image - this_model_image = this_model_image.reshape((self._flat_sky_projection.npix_height, - self._flat_sky_projection.npix_width)).T + this_model_image = this_model_image.reshape( + ( + self._flat_sky_projection.npix_height, + self._flat_sky_projection.npix_width, + ) + ).T return this_model_image diff --git a/hawc_hal/convolved_source/convolved_point_source.py b/hawc_hal/convolved_source/convolved_point_source.py index 219a640..2bcbbf0 100644 --- a/hawc_hal/convolved_source/convolved_point_source.py +++ b/hawc_hal/convolved_source/convolved_point_source.py @@ -11,12 +11,12 @@ from ..interpolation.log_log_interpolation import LogLogInterpolator from threeML.io.logging import setup_logger + log = setup_logger(__name__) log.propagate = False class ConvolvedPointSource(object): - def __init__(self, source, response, flat_sky_projection): assert isinstance(source, PointSource) @@ -48,21 +48,29 @@ def _update_dec_bins(self, dec_src): if abs(dec_src - self._last_processed_position[1]) > 0.1: # Source moved by more than 0.1 deg, let's recompute the response and the PSF - self._response_energy_bins = self._response.get_response_dec_bin(dec_src, interpolate=True) + self._response_energy_bins = self._response.get_response_dec_bin( + dec_src, interpolate=True + ) # Setup the PSF interpolators self._psf_interpolators = collections.OrderedDict() for bin_id in self._response_energy_bins: - self._psf_interpolators[bin_id] = PSFInterpolator(self._response_energy_bins[bin_id].psf, - self._flat_sky_projection) + self._psf_interpolators[bin_id] = PSFInterpolator( + self._response_energy_bins[bin_id].psf, self._flat_sky_projection + ) - def get_source_map(self, response_bin_id, tag=None, integrate=False, psf_integration_method='fast'): + def get_source_map( + self, response_bin_id, tag=None, integrate=False, psf_integration_method="fast" + ): # Get current point source position # NOTE: this might change if the point source position is free during the fit, # that's why it is here - ra_src, dec_src = self._source.position.ra.value, self._source.position.dec.value + ra_src, dec_src = ( + self._source.position.ra.value, + self._source.position.dec.value, + ) if (ra_src, dec_src) != self._last_processed_position: @@ -78,21 +86,26 @@ def get_source_map(self, response_bin_id, tag=None, integrate=False, psf_integra # Get the PSF image # This is cached inside the PSF class, so that if the position doesn't change this line # is very fast - this_map = psf_interpolator.point_source_image(ra_src, dec_src, psf_integration_method) + this_map = psf_interpolator.point_source_image( + ra_src, dec_src, psf_integration_method + ) # Check that the point source image is entirely contained in the ROI, if not print a warning map_sum = this_map.sum() if not np.isclose(map_sum, 1.0, rtol=1e-2): - log.warning("PSF for source %s is not entirely contained " - "in ROI for response bin %s. Fraction is %.2f instead of 1.0. " - "Consider enlarging your model ROI." % (self._name, - response_bin_id, - map_sum)) + log.warning( + "PSF for source %s is not entirely contained " + "in ROI for response bin %s. Fraction is %.2f instead of 1.0. " + "Consider enlarging your model ROI." + % (self._name, response_bin_id, map_sum) + ) # Compute the fluxes from the spectral function at the same energies as the simulated function - energy_centers_keV = response_energy_bin.sim_energy_bin_centers * 1e9 # astromodels expects energies in keV + energy_centers_keV = ( + response_energy_bin.sim_energy_bin_centers * 1e9 + ) # astromodels expects energies in keV # This call needs to be here because the parameters of the model might change, # for example during a fit @@ -100,7 +113,9 @@ def get_source_map(self, response_bin_id, tag=None, integrate=False, psf_integra source_diff_spectrum = self._source(energy_centers_keV, tag=tag) # This provide a way to force the use of integration, for testing - if os.environ.get("HAL_INTEGRATE_POINT_SOURCE", '').lower() == 'yes': # pragma: no cover + if ( + os.environ.get("HAL_INTEGRATE_POINT_SOURCE", "").lower() == "yes" + ): # pragma: no cover integrate = True @@ -110,26 +125,46 @@ def get_source_map(self, response_bin_id, tag=None, integrate=False, psf_integra # It is off by default because it is slower and it doesn't provide any improvement in accuracy # according to my simulations (GV) - interp_spectrum = LogLogInterpolator(response_energy_bin.sim_energy_bin_centers, - source_diff_spectrum * 1e9, - k=2) - - interp_sim_spectrum = LogLogInterpolator(response_energy_bin.sim_energy_bin_centers, - response_energy_bin.sim_differential_photon_fluxes, - k=2) - - src_spectrum = [interp_spectrum.integral(a, b) for (a, b) in zip(response_energy_bin.sim_energy_bin_low, - response_energy_bin.sim_energy_bin_hi)] - - sim_spectrum = [interp_sim_spectrum.integral(a, b) for (a, b) in zip(response_energy_bin.sim_energy_bin_low, - response_energy_bin.sim_energy_bin_hi)] + interp_spectrum = LogLogInterpolator( + response_energy_bin.sim_energy_bin_centers, + source_diff_spectrum * 1e9, + k=2, + ) + + interp_sim_spectrum = LogLogInterpolator( + response_energy_bin.sim_energy_bin_centers, + response_energy_bin.sim_differential_photon_fluxes, + k=2, + ) + + src_spectrum = [ + interp_spectrum.integral(a, b) + for (a, b) in zip( + response_energy_bin.sim_energy_bin_low, + response_energy_bin.sim_energy_bin_hi, + ) + ] + + sim_spectrum = [ + interp_sim_spectrum.integral(a, b) + for (a, b) in zip( + response_energy_bin.sim_energy_bin_low, + response_energy_bin.sim_energy_bin_hi, + ) + ] scale = old_div(np.array(src_spectrum), np.array(sim_spectrum)) else: # Transform from keV^-1 cm^-2 s^-1 to TeV^-1 cm^-2 s^-1 and re-weight the detected counts - scale = old_div(source_diff_spectrum, response_energy_bin.sim_differential_photon_fluxes) * 1e9 + scale = ( + old_div( + source_diff_spectrum, + response_energy_bin.sim_differential_photon_fluxes, + ) + * 1e9 + ) # Now return the map multiplied by the scale factor return np.sum(scale * response_energy_bin.sim_signal_events_per_bin) * this_map diff --git a/hawc_hal/convolved_source/convolved_source_container.py b/hawc_hal/convolved_source/convolved_source_container.py index 4b15b09..6095293 100644 --- a/hawc_hal/convolved_source/convolved_source_container.py +++ b/hawc_hal/convolved_source/convolved_source_container.py @@ -1,4 +1,6 @@ from builtins import object + + class ConvolvedSourcesContainer(object): def __init__(self): diff --git a/hawc_hal/flat_sky_projection.py b/hawc_hal/flat_sky_projection.py index 7b0bdd7..d9400a8 100644 --- a/hawc_hal/flat_sky_projection.py +++ b/hawc_hal/flat_sky_projection.py @@ -33,11 +33,21 @@ def _get_header(ra, dec, pixel_size, coordsys, h, w): assert 0 <= ra <= 360 - header = fits.Header.fromstring(_fits_header % (h, w, - old_div(h, 2), ra, pixel_size, - old_div(w, 2), dec, pixel_size, - coordsys), - sep='\n') + header = fits.Header.fromstring( + _fits_header + % ( + h, + w, + old_div(h, 2), + ra, + pixel_size, + old_div(w, 2), + dec, + pixel_size, + coordsys, + ), + sep="\n", + ) return header @@ -49,8 +59,7 @@ def _get_all_ra_dec(input_wcs, h, w): xx = np.arange(0.5, h + 0.5, 1, dtype=np.int16) yy = np.arange(0.5, w + 0.5, 1, dtype=np.int16) - _ij_grid = cartesian((xx, - yy)) + _ij_grid = cartesian((xx, yy)) # Convert pixel coordinates to world coordinates world = input_wcs.all_pix2world(_ij_grid, 0, ra_dec_order=True) @@ -59,7 +68,6 @@ def _get_all_ra_dec(input_wcs, h, w): class FlatSkyProjection(object): - def __init__(self, ra_center, dec_center, pixel_size_deg, npix_height, npix_width): assert npix_height % 2 == 0, "Number of height pixels must be even" @@ -86,7 +94,11 @@ def __init__(self, ra_center, dec_center, pixel_size_deg, npix_height, npix_widt # Build projection, i.e., a World Coordinate System object - self._wcs = WCS(_get_header(ra_center, dec_center, pixel_size_deg, 'icrs', npix_height, npix_width)) + self._wcs = WCS( + _get_header( + ra_center, dec_center, pixel_size_deg, "icrs", npix_height, npix_width + ) + ) # Pre-compute all R.A., Decs self._ras, self._decs = _get_all_ra_dec(self._wcs, npix_height, npix_width) @@ -240,4 +252,3 @@ def project_plane_pixel_area(self): # self._distance_cache[key] = (ds[fine_selection_idx], selection_idx) # # return self._distance_cache[key] - diff --git a/hawc_hal/healpix_handling/__init__.py b/hawc_hal/healpix_handling/__init__.py index 09df17b..1332c85 100644 --- a/hawc_hal/healpix_handling/__init__.py +++ b/hawc_hal/healpix_handling/__init__.py @@ -2,4 +2,4 @@ from .flat_sky_to_healpix import FlatSkyToHealpixTransform from .sparse_healpix import SparseHealpix, DenseHealpix from .gnomonic_projection import get_gnomonic_projection -from .healpix_utils import radec_to_vec \ No newline at end of file +from .healpix_utils import radec_to_vec diff --git a/hawc_hal/healpix_handling/flat_sky_to_healpix.py b/hawc_hal/healpix_handling/flat_sky_to_healpix.py index 4a4822e..6c508ae 100644 --- a/hawc_hal/healpix_handling/flat_sky_to_healpix.py +++ b/hawc_hal/healpix_handling/flat_sky_to_healpix.py @@ -15,16 +15,16 @@ ORDER = {} -ORDER['nearest-neighbor'] = 0 -ORDER['bilinear'] = 1 -ORDER['biquadratic'] = 2 -ORDER['bicubic'] = 3 +ORDER["nearest-neighbor"] = 0 +ORDER["bilinear"] = 1 +ORDER["biquadratic"] = 2 +ORDER["bicubic"] = 3 COORDSYS = { - 'g': Galactic(), - 'c': ICRS(), - 'icrs': ICRS(), + "g": Galactic(), + "c": ICRS(), + "icrs": ICRS(), } @@ -48,14 +48,13 @@ def _convert_world_coordinates(lon_in, lat_in, wcs_in, wcs_out): lon_out_unit = u.Unit(wcs_out.wcs.cunit[0]) lat_out_unit = u.Unit(wcs_out.wcs.cunit[1]) - data = UnitSphericalRepresentation(lon_in * lon_in_unit, - lat_in * lat_in_unit) + data = UnitSphericalRepresentation(lon_in * lon_in_unit, lat_in * lat_in_unit) coords_in = frame_in.realize_frame(data) coords_out = coords_in.transform_to(frame_out) - lon_out = coords_out.represent_as('unitspherical').lon.to(lon_out_unit).value - lat_out = coords_out.represent_as('unitspherical').lat.to(lat_out_unit).value + lon_out = coords_out.represent_as("unitspherical").lon.to(lon_out_unit).value + lat_out = coords_out.represent_as("unitspherical").lat.to(lat_out_unit).value return lon_out, lat_out @@ -69,20 +68,31 @@ class FlatSkyToHealpixTransform(object): the transformation. This avoids to re-compute the same quantities over and over again. """ - def __init__(self, wcs_in, coord_system_out, nside, pixels_id, input_shape, order='bilinear', nested=False): + def __init__( + self, + wcs_in, + coord_system_out, + nside, + pixels_id, + input_shape, + order="bilinear", + nested=False, + ): # Look up lon, lat of pixels in output system and convert colatitude theta # and longitude phi to longitude and latitude. theta, phi = hp.pix2ang(nside, pixels_id, nested) lon_out = np.degrees(phi) - lat_out = 90. - np.degrees(theta) + lat_out = 90.0 - np.degrees(theta) # Convert between celestial coordinates coord_system_out = _parse_coord_system(coord_system_out) - with np.errstate(invalid='ignore'): - lon_in, lat_in = _convert_world_coordinates(lon_out, lat_out, (coord_system_out, u.deg, u.deg), wcs_in) + with np.errstate(invalid="ignore"): + lon_in, lat_in = _convert_world_coordinates( + lon_out, lat_out, (coord_system_out, u.deg, u.deg), wcs_in + ) # Look up pixels in input system yinds, xinds = wcs_in.wcs_world2pix(lon_in, lat_in, 0) diff --git a/hawc_hal/healpix_handling/gnomonic_projection.py b/hawc_hal/healpix_handling/gnomonic_projection.py index c4a0fca..8e59820 100644 --- a/hawc_hal/healpix_handling/gnomonic_projection.py +++ b/hawc_hal/healpix_handling/gnomonic_projection.py @@ -16,18 +16,20 @@ def get_gnomonic_projection(figure, hpx_map, **kwargs): :return: the array containing the projection. """ - defaults = {'coord': 'C', - 'rot': None, - 'format': '%g', - 'flip': 'astro', - 'xsize': 200, - 'ysize': None, - 'reso': 1.5, - 'nest': False, - 'min': None, - 'max': None, - 'cmap': None, - 'norm': None} + defaults = { + "coord": "C", + "rot": None, + "format": "%g", + "flip": "astro", + "xsize": 200, + "ysize": None, + "reso": 1.5, + "nest": False, + "min": None, + "max": None, + "cmap": None, + "norm": None, + } for key, default_value in list(defaults.items()): @@ -47,26 +49,31 @@ def get_gnomonic_projection(figure, hpx_map, **kwargs): # extent[3] - margins[3] - margins[1]) extent = (0.05, 0.05, 0.9, 0.9) - ax = PA.HpxGnomonicAxes(figure, extent, - coord=kwargs['coord'], - rot=kwargs['rot'], - format=kwargs['format'], - flipconv=kwargs['flip']) + ax = PA.HpxGnomonicAxes( + figure, + extent, + coord=kwargs["coord"], + rot=kwargs["rot"], + format=kwargs["format"], + flipconv=kwargs["flip"], + ) # Suppress warnings about nans with np.warnings.catch_warnings(): - np.warnings.filterwarnings('ignore') + np.warnings.filterwarnings("ignore") - img = ax.projmap(hpx_map, - nest=kwargs['nest'], - coord=kwargs['coord'], - vmin=kwargs['min'], - vmax=kwargs['max'], - xsize=kwargs['xsize'], - ysize=kwargs['ysize'], - reso=kwargs['reso'], - cmap=kwargs['cmap'], - norm=kwargs['norm']) + img = ax.projmap( + hpx_map, + nest=kwargs["nest"], + coord=kwargs["coord"], + vmin=kwargs["min"], + vmax=kwargs["max"], + xsize=kwargs["xsize"], + ysize=kwargs["ysize"], + reso=kwargs["reso"], + cmap=kwargs["cmap"], + norm=kwargs["norm"], + ) return img diff --git a/hawc_hal/healpix_handling/healpix_utils.py b/hawc_hal/healpix_handling/healpix_utils.py index e8d552c..3a56e0d 100644 --- a/hawc_hal/healpix_handling/healpix_utils.py +++ b/hawc_hal/healpix_handling/healpix_utils.py @@ -25,4 +25,4 @@ def radec_to_vec(ra, dec): # ra = np.rad2deg(phi) # dec = np.rad2deg(0.5 * np.pi - theta) # -# return ra, dec \ No newline at end of file +# return ra, dec diff --git a/hawc_hal/healpix_handling/sparse_healpix.py b/hawc_hal/healpix_handling/sparse_healpix.py index dd4907e..f9d13c7 100644 --- a/hawc_hal/healpix_handling/sparse_healpix.py +++ b/hawc_hal/healpix_handling/sparse_healpix.py @@ -65,7 +65,6 @@ def to_pandas(self): class SparseHealpix(HealpixWrapperBase): - def __init__(self, partial_map, pixels_ids, nside, fill_value=UNSEEN): self._partial_map = partial_map @@ -82,7 +81,7 @@ def __add__(self, other_map): added = self.as_partial() + other_map.as_partial() sparse_added = SparseHealpix(added, self._pixels_ids, self.nside) - + return sparse_added def __sub__(self, other_map): @@ -127,7 +126,6 @@ def pixels_ids(self): return self._pixels_ids - class DenseHealpix(HealpixWrapperBase): """ A dense (fullsky) healpix map. In this case partial and complete are the same map. @@ -138,7 +136,9 @@ def __init__(self, healpix_array): self._dense_map = healpix_array - super(DenseHealpix, self).__init__(nside=hp.npix2nside(healpix_array.shape[0]), sparse=False) + super(DenseHealpix, self).__init__( + nside=hp.npix2nside(healpix_array.shape[0]), sparse=False + ) def as_dense(self): """ @@ -159,4 +159,3 @@ def set_new_values(self, new_values): assert new_values.shape == self._dense_map.shape self._dense_map[:] = new_values - diff --git a/hawc_hal/interpolation/__init__.py b/hawc_hal/interpolation/__init__.py index 50ef670..d2b5c74 100644 --- a/hawc_hal/interpolation/__init__.py +++ b/hawc_hal/interpolation/__init__.py @@ -1,3 +1,3 @@ from __future__ import absolute_import from .fast_bilinar_interpolation import FastBilinearInterpolation -from .irregular_grid import FastLinearInterpolatorIrregularGrid # pragma: no cover \ No newline at end of file +from .irregular_grid import FastLinearInterpolatorIrregularGrid # pragma: no cover diff --git a/hawc_hal/interpolation/fast_bilinar_interpolation.py b/hawc_hal/interpolation/fast_bilinar_interpolation.py index c383a68..55dbab9 100644 --- a/hawc_hal/interpolation/fast_bilinar_interpolation.py +++ b/hawc_hal/interpolation/fast_bilinar_interpolation.py @@ -28,8 +28,8 @@ def __init__(self, input_shape, new_points): @staticmethod def _find_bounding_box(xaxis, yaxis, xs, ys): # Find lower left corner of bounding box - xidx = np.searchsorted(xaxis, xs, 'left') - 1 - yidx = np.searchsorted(yaxis, ys, 'left') - 1 + xidx = np.searchsorted(xaxis, xs, "left") - 1 + yidx = np.searchsorted(yaxis, ys, "left") - 1 lower_left_x = xaxis[xidx] lower_left_y = yaxis[yidx] @@ -43,10 +43,16 @@ def _find_bounding_box(xaxis, yaxis, xs, ys): lower_right_x = xaxis[xidx + 1] lower_right_y = yaxis[yidx] - return (lower_left_x, lower_left_y, - upper_left_x, upper_left_y, - upper_right_x, upper_right_y, - lower_right_x, lower_right_y) + return ( + lower_left_x, + lower_left_y, + upper_left_x, + upper_left_y, + upper_right_x, + upper_right_y, + lower_right_x, + lower_right_y, + ) def compute_coefficients(self, p): @@ -60,7 +66,9 @@ def compute_coefficients(self, p): # # y2 q12 q22 - x1, y2, xx1, y1, x2, yy1, xx2, yy2 = self._find_bounding_box(self._gridx, self._gridy, xx, yy) + x1, y2, xx1, y1, x2, yy1, xx2, yy2 = self._find_bounding_box( + self._gridx, self._gridy, xx, yy + ) bs = np.zeros((xx.shape[0], 4), np.float64) @@ -78,10 +86,9 @@ def compute_coefficients(self, p): # Stack them so that they are in the right order, i.e.: # ul1, ur1, ll1, lr1, ul2, ur2, ll2, lr2 ... uln, urn, lln, lrn - flat_points = np.vstack([flat_upper_left, - flat_upper_right, - flat_lower_left, - flat_lower_right]).T.flatten() + flat_points = np.vstack( + [flat_upper_left, flat_upper_right, flat_lower_left, flat_lower_right] + ).T.flatten() return bs, flat_points.astype(np.int64) @@ -97,4 +104,4 @@ def _apply_bilinar_interpolation(bs, points, data): # pragma: no cover vs = data.ravel()[points] - return np.sum(bs * vs.reshape(bs.shape), axis=1).flatten() \ No newline at end of file + return np.sum(bs * vs.reshape(bs.shape), axis=1).flatten() diff --git a/hawc_hal/interpolation/irregular_grid.py b/hawc_hal/interpolation/irregular_grid.py index 583d955..31042fc 100644 --- a/hawc_hal/interpolation/irregular_grid.py +++ b/hawc_hal/interpolation/irregular_grid.py @@ -17,18 +17,17 @@ def interp_weights(xy, uv, d=2): # pragma: no cover delta = uv - temp[:, d] - bary = np.einsum('njk,nk->nj', temp[:, :d, :], delta) + bary = np.einsum("njk,nk->nj", temp[:, :d, :], delta) return vertices, np.hstack((bary, 1 - bary.sum(axis=1, keepdims=True))) def interpolate(values, vtx, wts): # pragma: no cover - return np.einsum('nj,nj->n', np.take(values, vtx), wts) + return np.einsum("nj,nj->n", np.take(values, vtx), wts) class FastLinearInterpolatorIrregularGrid(object): # pragma: no cover - def __init__(self, data_shape, new_coords): old_coords = cartesian([np.arange(data_shape[0]), np.arange(data_shape[1])]) @@ -37,4 +36,4 @@ def __init__(self, data_shape, new_coords): def __call__(self, data): - return interpolate(data, self._vtx, self._wts) \ No newline at end of file + return interpolate(data, self._vtx, self._wts) diff --git a/hawc_hal/interpolation/log_log_interpolation.py b/hawc_hal/interpolation/log_log_interpolation.py index 146fd25..0d01f2f 100644 --- a/hawc_hal/interpolation/log_log_interpolation.py +++ b/hawc_hal/interpolation/log_log_interpolation.py @@ -6,7 +6,6 @@ class LogLogInterpolator(object): # pragma: no cover - def __init__(self, x, y, k=2): y = y.astype(np.float64) @@ -19,7 +18,7 @@ def __init__(self, x, y, k=2): def __call__(self, x): - return 10**self._interp(log10(x)) + return 10 ** self._interp(log10(x)) def integral(self, a, b, n_points=100, k=1): @@ -30,4 +29,4 @@ def integral(self, a, b, n_points=100, k=1): int_interp = scipy.interpolate.InterpolatedUnivariateSpline(xx, yy, k=k) - return int_interp.integral(a, b) \ No newline at end of file + return int_interp.integral(a, b) diff --git a/hawc_hal/log_likelihood.py b/hawc_hal/log_likelihood.py index 80b7ad3..955020e 100644 --- a/hawc_hal/log_likelihood.py +++ b/hawc_hal/log_likelihood.py @@ -4,9 +4,17 @@ # This function has two signatures in numba because if there are no sources in the likelihood model, # then expected_model_counts is 0.0 -@jit(["float64(float64[:], float64[:], float64[:])", "float64(float64[:], float64[:], float64)"], - nopython=True, parallel=False) -def log_likelihood(observed_counts, expected_bkg_counts, expected_model_counts): # pragma: no cover +@jit( + [ + "float64(float64[:], float64[:], float64[:])", + "float64(float64[:], float64[:], float64)", + ], + nopython=True, + parallel=False, +) +def log_likelihood( + observed_counts, expected_bkg_counts, expected_model_counts +): # pragma: no cover """ Poisson log-likelihood minus log factorial minus bias. The bias migth be needed to keep the numerical value of the likelihood small enough so that there aren't numerical problems when computing differences between two @@ -26,4 +34,4 @@ def log_likelihood(observed_counts, expected_bkg_counts, expected_model_counts): log_likes = observed_counts * np.log(predicted_counts) - predicted_counts - return np.sum(log_likes) \ No newline at end of file + return np.sum(log_likes) diff --git a/hawc_hal/maptree/data_analysis_bin.py b/hawc_hal/maptree/data_analysis_bin.py index d98717e..56da6b4 100644 --- a/hawc_hal/maptree/data_analysis_bin.py +++ b/hawc_hal/maptree/data_analysis_bin.py @@ -4,16 +4,27 @@ class DataAnalysisBin(object): - - def __init__(self, name, observation_hpx_map, background_hpx_map, active_pixels_ids, n_transits, scheme='RING'): + def __init__( + self, + name, + observation_hpx_map, + background_hpx_map, + active_pixels_ids, + n_transits, + scheme="RING", + ): # Get nside self._nside = observation_hpx_map.nside nside_bkg = background_hpx_map.nside - assert self._nside == nside_bkg, "Observation and background maps have " \ - "different nside (%i vs %i)" % (self._nside, nside_bkg) + assert ( + self._nside == nside_bkg + ), "Observation and background maps have " "different nside (%i vs %i)" % ( + self._nside, + nside_bkg, + ) self._npix = observation_hpx_map.npix @@ -28,7 +39,7 @@ def __init__(self, name, observation_hpx_map, background_hpx_map, active_pixels_ # Store name and scheme self._name = str(name) - assert scheme in ['RING', 'NEST'], "Scheme must be either RING or NEST" + assert scheme in ["RING", "NEST"], "Scheme must be either RING or NEST" self._scheme = scheme @@ -37,17 +48,23 @@ def __init__(self, name, observation_hpx_map, background_hpx_map, active_pixels_ def to_pandas(self): # Make a dataframe - df = pd.DataFrame.from_dict({'observation': self._observation_hpx_map.to_pandas(), - 'background': self._background_hpx_map.to_pandas()}) + df = pd.DataFrame.from_dict( + { + "observation": self._observation_hpx_map.to_pandas(), + "background": self._background_hpx_map.to_pandas(), + } + ) if self._active_pixels_ids is not None: # We are saving only a subset df.set_index(self._active_pixels_ids, inplace=True) # Some metadata - meta = {'scheme': 0 if self._scheme == 'RING' else 1, - 'n_transits': self._n_transits, - 'nside': self._nside} + meta = { + "scheme": 0 if self._scheme == "RING" else 1, + "n_transits": self._n_transits, + "nside": self._nside, + } return df, meta diff --git a/hawc_hal/maptree/map_tree.py b/hawc_hal/maptree/map_tree.py index 071857f..421914a 100644 --- a/hawc_hal/maptree/map_tree.py +++ b/hawc_hal/maptree/map_tree.py @@ -125,8 +125,12 @@ def display(self): df = pd.DataFrame() df["Bin"] = list(self._analysis_bins.keys()) - df["Nside"] = [self._analysis_bins[bin_id].nside for bin_id in self._analysis_bins] - df["Scheme"] = [self._analysis_bins[bin_id].scheme for bin_id in self._analysis_bins] + df["Nside"] = [ + self._analysis_bins[bin_id].nside for bin_id in self._analysis_bins + ] + df["Scheme"] = [ + self._analysis_bins[bin_id].scheme for bin_id in self._analysis_bins + ] # Compute observed counts, background counts, how many pixels we have in the ROI and # the sky area they cover @@ -193,9 +197,9 @@ def write(self, filename): analysis_bin = self._analysis_bins[bin_id] - assert bin_id == analysis_bin.name, "Bin name inconsistency: {} != {}".format( - bin_id, analysis_bin.name - ) + assert ( + bin_id == analysis_bin.name + ), "Bin name inconsistency: {} != {}".format(bin_id, analysis_bin.name) multi_index_keys.append(analysis_bin.name) @@ -223,7 +227,9 @@ def write(self, filename): else: - serializer.store_pandas_object("/ROI", pd.Series(), **self._roi.to_dict()) + serializer.store_pandas_object( + "/ROI", pd.Series(), **self._roi.to_dict() + ) else: diff --git a/hawc_hal/obsolete/fast_convolution.py b/hawc_hal/obsolete/fast_convolution.py index 282541c..e037a7f 100644 --- a/hawc_hal/obsolete/fast_convolution.py +++ b/hawc_hal/obsolete/fast_convolution.py @@ -19,10 +19,10 @@ def _next_regular(target): return target # Quickly check if it's already a power of 2 - if not (target & (target-1)): + if not (target & (target - 1)): return target - match = float('inf') # Anything found will be smaller + match = float("inf") # Anything found will be smaller p5 = 1 while p5 < target: p35 = p5 @@ -33,10 +33,10 @@ def _next_regular(target): # Quickly find next power of 2 >= quotient try: - p2 = 2**((quotient - 1).bit_length()) + p2 = 2 ** ((quotient - 1).bit_length()) except AttributeError: # Fallback for Python <2.7 - p2 = 2**(len(bin(quotient - 1)) - 2) + p2 = 2 ** (len(bin(quotient - 1)) - 2) N = p2 * p35 if N == target: @@ -67,13 +67,11 @@ def _centered(arr, newsize): class FastConvolution(object): - def __init__(self, ras, decs, nhpix, nwpix, psf_instance): # Make the PSF image psf_image = np.zeros((nhpix, nwpix)) - self._ras = ras self._decs = decs @@ -108,9 +106,13 @@ def setup(self, expected_shape): def __call__(self, image): - assert alltrue(image.shape == self._expected_shape), "Shape of image to be convolved is not correct. Re-run setup." + assert alltrue( + image.shape == self._expected_shape + ), "Shape of image to be convolved is not correct. Re-run setup." - ret = irfftn(rfftn(image, self._fshape) * self._psf_fft, self._fshape)[self._fslice].copy() + ret = irfftn(rfftn(image, self._fshape) * self._psf_fft, self._fshape)[ + self._fslice + ].copy() return _centered(ret, self._expected_shape) @@ -130,12 +132,16 @@ def brute_force_convolution(image, kernel, output): half_size_minus_one = (kernel_size - 1) // 2 - for i in range(half_size_minus_one, h-half_size_minus_one): + for i in range(half_size_minus_one, h - half_size_minus_one): - for j in range(half_size_minus_one, w-half_size_minus_one): + for j in range(half_size_minus_one, w - half_size_minus_one): - output[j, i] = np.sum(image[j - half_size_minus_one : j + half_size_minus_one + 1, - i - half_size_minus_one : i + half_size_minus_one + 1] - * kernel) + output[j, i] = np.sum( + image[ + j - half_size_minus_one : j + half_size_minus_one + 1, + i - half_size_minus_one : i + half_size_minus_one + 1, + ] + * kernel + ) - return output \ No newline at end of file + return output diff --git a/hawc_hal/obsolete/image_to_healpix.py b/hawc_hal/obsolete/image_to_healpix.py index 0472f8f..e8e1782 100644 --- a/hawc_hal/obsolete/image_to_healpix.py +++ b/hawc_hal/obsolete/image_to_healpix.py @@ -1,4 +1,8 @@ -from hawc_hal.healpix_handling.flat_sky_to_healpix import _parse_coord_system, _convert_world_coordinates, ORDER +from hawc_hal.healpix_handling.flat_sky_to_healpix import ( + _parse_coord_system, + _convert_world_coordinates, + ORDER, +) from hawc_hal.special_values import UNSEEN import healpy as hp import numpy as np @@ -7,9 +11,19 @@ from astropy import units as u -def image_to_healpix(data, wcs_in, coord_system_out, - nside, pixels_id, order='bilinear', nested=False, - fill_value=UNSEEN, pixels_to_be_zeroed=None, full=False): + +def image_to_healpix( + data, + wcs_in, + coord_system_out, + nside, + pixels_id, + order="bilinear", + nested=False, + fill_value=UNSEEN, + pixels_to_be_zeroed=None, + full=False, +): npix = hp.nside2npix(nside) @@ -18,13 +32,15 @@ def image_to_healpix(data, wcs_in, coord_system_out, theta, phi = hp.pix2ang(nside, pixels_id, nested) lon_out = np.degrees(phi) - lat_out = 90. - np.degrees(theta) + lat_out = 90.0 - np.degrees(theta) # Convert between celestial coordinates coord_system_out = _parse_coord_system(coord_system_out) - with np.errstate(invalid='ignore'): - lon_in, lat_in = _convert_world_coordinates(lon_out, lat_out, (coord_system_out, u.deg, u.deg), wcs_in) + with np.errstate(invalid="ignore"): + lon_in, lat_in = _convert_world_coordinates( + lon_out, lat_out, (coord_system_out, u.deg, u.deg), wcs_in + ) # Look up pixels in input system yinds, xinds = wcs_in.wcs_world2pix(lon_in, lat_in, 0) @@ -34,9 +50,9 @@ def image_to_healpix(data, wcs_in, coord_system_out, if isinstance(order, six.string_types): order = ORDER[order] - healpix_data_ = map_coordinates(data, [xinds, yinds], - order=order, - mode='constant', cval=fill_value) + healpix_data_ = map_coordinates( + data, [xinds, yinds], order=order, mode="constant", cval=fill_value + ) if not full: @@ -53,8 +69,10 @@ def image_to_healpix(data, wcs_in, coord_system_out, if pixels_to_be_zeroed is not None: - healpix_data[pixels_to_be_zeroed] = np.where(np.isnan(healpix_data[pixels_to_be_zeroed]), - 0.0, - healpix_data[pixels_to_be_zeroed]) + healpix_data[pixels_to_be_zeroed] = np.where( + np.isnan(healpix_data[pixels_to_be_zeroed]), + 0.0, + healpix_data[pixels_to_be_zeroed], + ) return healpix_data diff --git a/hawc_hal/obsolete/tf1_wrapper.py b/hawc_hal/obsolete/tf1_wrapper.py index 08cfaa4..4700880 100644 --- a/hawc_hal/obsolete/tf1_wrapper.py +++ b/hawc_hal/obsolete/tf1_wrapper.py @@ -1,6 +1,7 @@ from builtins import object -class TF1Wrapper(object): + +class TF1Wrapper(object): def __init__(self, tf1_instance): # Make a copy so that if the passed instance was a pointer from a TFile, # it will survive the closing of the associated TFile @@ -15,4 +16,4 @@ def integral(self, *args, **kwargs): return self._tf1.Integral(*args, **kwargs) def __call__(self, *args, **kwargs): - return self._tf1.Eval(*args, **kwargs) \ No newline at end of file + return self._tf1.Eval(*args, **kwargs) diff --git a/hawc_hal/obsolete/ts_map.py b/hawc_hal/obsolete/ts_map.py index 6691633..288b59c 100644 --- a/hawc_hal/obsolete/ts_map.py +++ b/hawc_hal/obsolete/ts_map.py @@ -6,6 +6,7 @@ import numpy as np from threeML.io.logging import setup_logger + log = setup_logger(__name__) log.propagate = False @@ -18,7 +19,10 @@ import matplotlib.pyplot as plt -from threeML.parallel.parallel_client import ParallelClient, is_parallel_computation_active +from threeML.parallel.parallel_client import ( + ParallelClient, + is_parallel_computation_active, +) from tqdm.auto import tqdm from threeML.io.fits_file import FITSFile @@ -26,203 +30,228 @@ class ParallelTSmap(object): - - def __init__(self, maptree, response, ra_c, dec_c, xsize, ysize, pix_scale, projection = 'AIT', roi_radius=3.0): + def __init__( + self, + maptree, + response, + ra_c, + dec_c, + xsize, + ysize, + pix_scale, + projection="AIT", + roi_radius=3.0, + ): # Create a new WCS object so that we can compute the appropriare R.A. and Dec # where we need to compute the TS self._wcs = wcs.WCS(naxis=2) - + # The +1 is because the CRPIX should be in FORTRAN indexing, which starts at +1 self._wcs.wcs.crpix = [xsize / 2.0, ysize / 2.0] self._wcs.wcs.cdelt = [-pix_scale, pix_scale] self._wcs.wcs.crval = [ra_c, dec_c] self._wcs.wcs.ctype = ["RA---%s" % projection, "DEC--%s" % projection] - + self._ra_c = ra_c self._dec_c = dec_c - + self._mtfile = maptree self._rsfile = response - + self._points = [] - + # It is important that dec is the first one because the PSF for a Dec bin_name # is cached within one engine - + max_d = 0 - + for idec in range(ysize): - + for ira in range(xsize): - - this_ra, this_dec = self._wcs.wcs_pix2world(ira, idec, 0) - - self._points.append((this_ra, this_dec)) - - d = angular_separation(*np.deg2rad((this_ra, this_dec, ra_c, dec_c))) - - if d > max_d: - - max_d = d - + + this_ra, this_dec = self._wcs.wcs_pix2world(ira, idec, 0) + + self._points.append((this_ra, this_dec)) + + d = angular_separation(*np.deg2rad((this_ra, this_dec, ra_c, dec_c))) + + if d > max_d: + + max_d = d + log.info("Maximum distance from center: %.3f deg" % np.rad2deg(max_d)) - + # We keep track of how many ras we have so that when running in parallel all # the ras will run on the same engine with the same dec, maximizing the use # of the cache and minimizing the memory footprint - + self._n_ras = xsize self._n_decs = ysize - + self._roi_radius = float(roi_radius) roi = HealpixConeROI(self._roi_radius, ra=ra_c, dec=dec_c) - + self._llh = HAL("HAWC", self._mtfile, self._rsfile, roi) - self._llh.set_active_measurements(1,9) - + self._llh.set_active_measurements(1, 9) + # Make a fit with no source to get the likelihood for the null hypothesis model = self.get_model(0) - model.TestSource.spectrum.main.shape.K = model.TestSource.spectrum.main.shape.K.min_value - + model.TestSource.spectrum.main.shape.K = ( + model.TestSource.spectrum.main.shape.K.min_value + ) + self._llh.set_model(model) - + self._like0 = self._llh.get_log_like() - + # We will fill this with the maximum of the TS map - + self._max_ts = None - + def get_data(self, interval_id): - + datalist = DataList(self._llh) - + return datalist - + def get_model(self, interval_id): - + spectrum = Powerlaw() - + this_ra = self._points[interval_id][0] this_dec = self._points[interval_id][1] - - this_source = PointSource("TestSource", ra=this_ra, dec=this_dec, spectral_shape=spectrum) - + + this_source = PointSource( + "TestSource", ra=this_ra, dec=this_dec, spectral_shape=spectrum + ) + spectrum.piv = 1 * u.TeV spectrum.piv.fix = True - + # Start from a flux 1/10 of the Crab spectrum.K = crab_diff_flux_at_1_TeV spectrum.K.bounds = (1e-30, 1e-18) spectrum.index = -2.63 spectrum.index.fix = False - + model1 = Model(this_source) - + return model1 - + def worker(self, interval_id): - + model = self.get_model(interval_id) data = self.get_data(interval_id) - + jl = JointLikelihood(model, data) jl.set_minimizer("ROOT") par, like = jl.fit(quiet=True) - - return like['-log(likelihood)']['HAWC'] - + return like["-log(likelihood)"]["HAWC"] + def go(self): - + if is_parallel_computation_active(): - + client = ParallelClient() - + if self._n_decs % client.get_number_of_engines() != 0: - - log.warning("The number of Dec bands is not a multiple of the number of engine. Make it so for optimal performances.", RuntimeWarning) - - res = client.execute_with_progress_bar(self.worker, list(range(len(self._points))), chunk_size=self._n_ras) - + + log.warning( + "The number of Dec bands is not a multiple of the number of engine. Make it so for optimal performances.", + RuntimeWarning, + ) + + res = client.execute_with_progress_bar( + self.worker, list(range(len(self._points))), chunk_size=self._n_ras + ) + else: - + n_points = len(self._points) - + p = tqdm(total=n_points) - + res = np.zeros(n_points) - + for i, point in enumerate(self._points): res[i] = self.worker(i) p.update(1) - + TS = 2 * (-np.array(res) - self._like0) - - #self._debug_map = {k:v for v,k in zip(self._points, TS)} - + + # self._debug_map = {k:v for v,k in zip(self._points, TS)} + # Get maximum of TS idx = TS.argmax() self._max_ts = (TS[idx], self._points[idx]) - - log.info("Maximum TS is %.2f at (R.A., Dec) = (%.3f, %.3f)" % (self._max_ts[0], self._max_ts[1][0], self._max_ts[1][1])) - + + log.info( + "Maximum TS is %.2f at (R.A., Dec) = (%.3f, %.3f)" + % (self._max_ts[0], self._max_ts[1][0], self._max_ts[1][1]) + ) + self._ts_map = TS.reshape(self._n_decs, self._n_ras) - + return self._ts_map - + @property def maximum_of_map(self): - + return self._max_ts - + def to_fits(self, filename, overwrite=True): - + primary_hdu = pyfits.PrimaryHDU(data=self._ts_map, header=self._wcs.to_header()) - + primary_hdu.writeto(filename, overwrite=overwrite) - + def plot(self): - + # Draw TS map - - fig, ax = plt.subplots(subplot_kw = {'projection': self._wcs}) - plt.imshow(self._ts_map, origin='lower', interpolation='none') - + fig, ax = plt.subplots(subplot_kw={"projection": self._wcs}) + + plt.imshow(self._ts_map, origin="lower", interpolation="none") + # Draw colorbar - + cbar = plt.colorbar(format="%.1f") - + cbar.set_label("TS") - + # Plot 1, 2 and 3 sigma contour - _ = plt.contour(self._ts_map, origin='lower', - levels=self._ts_map.max() - np.array([4.605, 7.378, 9.210][::-1]), - colors=['black', 'blue','red']) - + _ = plt.contour( + self._ts_map, + origin="lower", + levels=self._ts_map.max() - np.array([4.605, 7.378, 9.210][::-1]), + colors=["black", "blue", "red"], + ) + # Access the first WCS coordinate ra = ax.coords[0] dec = ax.coords[1] - + # Set the format of the tick labels - ra.set_major_formatter('d.ddd') - dec.set_major_formatter('d.ddd') - - plt.xlabel('R.A. (J2000)') - plt.ylabel('Dec. (J2000)') - + ra.set_major_formatter("d.ddd") + dec.set_major_formatter("d.ddd") + + plt.xlabel("R.A. (J2000)") + plt.ylabel("Dec. (J2000)") + # Overlay the center - - ax.scatter([self._ra_c],[self._dec_c], transform=ax.get_transform('world'), marker='o') - + + ax.scatter( + [self._ra_c], [self._dec_c], transform=ax.get_transform("world"), marker="o" + ) + # Overlay the maximum TS ra_max, dec_max = self._max_ts[1] - - ax.scatter([ra_max],[dec_max], transform=ax.get_transform('world'), marker='x') - - return fig + ax.scatter([ra_max], [dec_max], transform=ax.get_transform("world"), marker="x") + + return fig diff --git a/hawc_hal/psf_fast/__init__.py b/hawc_hal/psf_fast/__init__.py index 60f1343..bcf4043 100644 --- a/hawc_hal/psf_fast/__init__.py +++ b/hawc_hal/psf_fast/__init__.py @@ -1,4 +1,4 @@ from __future__ import absolute_import from .psf_wrapper import PSFWrapper, InvalidPSF, InvalidPSFError from .psf_interpolator import PSFInterpolator -from .psf_convolutor import PSFConvolutor \ No newline at end of file +from .psf_convolutor import PSFConvolutor diff --git a/hawc_hal/psf_fast/psf_convolutor.py b/hawc_hal/psf_fast/psf_convolutor.py index 39f9eb2..c759c62 100644 --- a/hawc_hal/psf_fast/psf_convolutor.py +++ b/hawc_hal/psf_fast/psf_convolutor.py @@ -5,7 +5,8 @@ from past.utils import old_div import numpy as np from numpy.fft import rfftn, irfftn -#from scipy.signal import fftconvolve + +# from scipy.signal import fftconvolve from scipy.fftpack import helper @@ -14,7 +15,6 @@ class PSFConvolutor(object): - def __init__(self, psf_wrapper, flat_sky_proj): self._psf = psf_wrapper # type: PSFWrapper @@ -22,22 +22,30 @@ def __init__(self, psf_wrapper, flat_sky_proj): # Compute an image of the PSF on the current defined flat sky projection interpolator = PSFInterpolator(psf_wrapper, flat_sky_proj) - psf_stamp = interpolator.point_source_image(flat_sky_proj.ra_center, flat_sky_proj.dec_center) + psf_stamp = interpolator.point_source_image( + flat_sky_proj.ra_center, flat_sky_proj.dec_center + ) # Crop the kernel at the appropriate radius for this PSF (making sure is divisible by 2) - kernel_radius_px = int(np.ceil(old_div(self._psf.kernel_radius, flat_sky_proj.pixel_size))) + kernel_radius_px = int( + np.ceil(old_div(self._psf.kernel_radius, flat_sky_proj.pixel_size)) + ) pixels_to_keep = kernel_radius_px * 2 - assert pixels_to_keep <= psf_stamp.shape[0] and \ - pixels_to_keep <= psf_stamp.shape[1], \ - "The kernel is too large with respect to the model image. Enlarge your model radius." + assert ( + pixels_to_keep <= psf_stamp.shape[0] + and pixels_to_keep <= psf_stamp.shape[1] + ), "The kernel is too large with respect to the model image. Enlarge your model radius." xoff = (psf_stamp.shape[0] - pixels_to_keep) // 2 yoff = (psf_stamp.shape[1] - pixels_to_keep) // 2 self._kernel = psf_stamp[yoff:-yoff, xoff:-xoff] - assert np.isclose(self._kernel.sum(), 1.0, rtol=1e-2), "Failed to generate proper kernel normalization: got _kernel.sum() = %f; expected 1.0+-0.01." % self._kernel.sum() + assert np.isclose(self._kernel.sum(), 1.0, rtol=1e-2), ( + "Failed to generate proper kernel normalization: got _kernel.sum() = %f; expected 1.0+-0.01." + % self._kernel.sum() + ) # Renormalize to exactly 1 self._kernel = old_div(self._kernel, self._kernel.sum()) @@ -68,9 +76,13 @@ def extended_source_image(self, ideal_image): # Convolve - assert np.alltrue(ideal_image.shape == self._expected_shape), "Shape of image to be convolved is not correct." + assert np.alltrue( + ideal_image.shape == self._expected_shape + ), "Shape of image to be convolved is not correct." - ret = irfftn(rfftn(ideal_image, self._fshape) * self._psf_fft, self._fshape)[self._fslice].copy() + ret = irfftn(rfftn(ideal_image, self._fshape) * self._psf_fft, self._fshape)[ + self._fslice + ].copy() conv = _centered(ret, self._expected_shape) diff --git a/hawc_hal/psf_fast/psf_interpolator.py b/hawc_hal/psf_fast/psf_interpolator.py index 7055637..f1a6e4c 100644 --- a/hawc_hal/psf_fast/psf_interpolator.py +++ b/hawc_hal/psf_fast/psf_interpolator.py @@ -17,13 +17,14 @@ def _divide_in_blocks(arr, nrows, ncols): each subblock preserving the "physical" layout of arr. """ h, w = arr.shape - return (arr.reshape(h // nrows, nrows, -1, ncols) - .swapaxes(1, 2) - .reshape(-1, nrows, ncols)) + return ( + arr.reshape(h // nrows, nrows, -1, ncols) + .swapaxes(1, 2) + .reshape(-1, nrows, ncols) + ) class PSFInterpolator(object): - def __init__(self, psf_wrapper, flat_sky_proj): self._psf = psf_wrapper @@ -39,32 +40,41 @@ def _get_point_source_image_aitoff(self, ra, dec, psf_integration_method): # First we obtain an image with a flat sky projection centered exactly on the point source ancillary_image_pixel_size = 0.05 - pixel_side = 2 * int(np.ceil(old_div(self._psf.truncation_radius, ancillary_image_pixel_size))) - ancillary_flat_sky_proj = flat_sky_projection.FlatSkyProjection(ra, dec, ancillary_image_pixel_size, - pixel_side, pixel_side) + pixel_side = 2 * int( + np.ceil(old_div(self._psf.truncation_radius, ancillary_image_pixel_size)) + ) + ancillary_flat_sky_proj = flat_sky_projection.FlatSkyProjection( + ra, dec, ancillary_image_pixel_size, pixel_side, pixel_side + ) # Now we compute the angular distance of all pixels in the ancillary image with respect to the center - angular_distances = sphere_dist(ra, dec, ancillary_flat_sky_proj.ras, ancillary_flat_sky_proj.decs) + angular_distances = sphere_dist( + ra, dec, ancillary_flat_sky_proj.ras, ancillary_flat_sky_proj.decs + ) # Compute the brightness (i.e., the differential PSF) - ancillary_brightness = self._psf.brightness(angular_distances).reshape((pixel_side, pixel_side)) * \ - self._flat_sky_p.project_plane_pixel_area + ancillary_brightness = ( + self._psf.brightness(angular_distances).reshape((pixel_side, pixel_side)) + * self._flat_sky_p.project_plane_pixel_area + ) # Now reproject this brightness on the new image - if psf_integration_method == 'exact': + if psf_integration_method == "exact": reprojection_method = reproject.reproject_exact - additional_keywords = {'parallel': False} + additional_keywords = {"parallel": False} else: reprojection_method = reproject.reproject_interp additional_keywords = {} - brightness, _ = reprojection_method((ancillary_brightness, ancillary_flat_sky_proj.wcs), - self._flat_sky_p.wcs, shape_out=(self._flat_sky_p.npix_height, - self._flat_sky_p.npix_width), - **additional_keywords) + brightness, _ = reprojection_method( + (ancillary_brightness, ancillary_flat_sky_proj.wcs), + self._flat_sky_p.wcs, + shape_out=(self._flat_sky_p.npix_height, self._flat_sky_p.npix_width), + **additional_keywords + ) brightness[np.isnan(brightness)] = 0.0 @@ -120,7 +130,7 @@ def _get_point_source_image_aitoff(self, ra, dec, psf_integration_method): return point_source_img_ait - def point_source_image(self, ra_src, dec_src, psf_integration_method='exact'): + def point_source_image(self, ra_src, dec_src, psf_integration_method="exact"): # Make a unique key for this request key = (ra_src, dec_src, psf_integration_method) @@ -132,7 +142,9 @@ def point_source_image(self, ra_src, dec_src, psf_integration_method='exact'): else: - point_source_img_ait = self._get_point_source_image_aitoff(ra_src, dec_src, psf_integration_method) + point_source_img_ait = self._get_point_source_image_aitoff( + ra_src, dec_src, psf_integration_method + ) # Store for future use diff --git a/hawc_hal/psf_fast/psf_wrapper.py b/hawc_hal/psf_fast/psf_wrapper.py index 1279e5d..762bccc 100644 --- a/hawc_hal/psf_fast/psf_wrapper.py +++ b/hawc_hal/psf_fast/psf_wrapper.py @@ -30,7 +30,9 @@ def __init__(self, xs, ys, brightness_interp_x=None, brightness_interp_y=None): # Memorize the total integral (will use it for normalization) - self._total_integral = self._psf_interpolated.integral(self._xs[0], _INTEGRAL_OUTER_RADIUS) + self._total_integral = self._psf_interpolated.integral( + self._xs[0], _INTEGRAL_OUTER_RADIUS + ) # Now compute the truncation radius, which is a very conservative measurement # of the size of the PSF @@ -90,7 +92,9 @@ def find_eef_radius(self, fraction): f = lambda r: fraction - old_div(self.integral(1e-4, r), self._total_integral) - radius, status = scipy.optimize.brentq(f, 0.005, _INTEGRAL_OUTER_RADIUS, full_output=True) + radius, status = scipy.optimize.brentq( + f, 0.005, _INTEGRAL_OUTER_RADIUS, full_output=True + ) assert status.converged, "Brentq did not converged" @@ -131,7 +135,9 @@ def combine_with_other_psf(self, other_psf, w1, w2): new_ys = w1 * self.ys + w2 * other_psf.ys # Also weight the brightness interpolation points - new_br_interp_y = w1 * self._brightness_interp_y + w2 * other_psf._brightness_interp_y + new_br_interp_y = ( + w1 * self._brightness_interp_y + w2 * other_psf._brightness_interp_y + ) return PSFWrapper( self.xs, diff --git a/hawc_hal/region_of_interest/__init__.py b/hawc_hal/region_of_interest/__init__.py index 2bb7871..3a779cb 100644 --- a/hawc_hal/region_of_interest/__init__.py +++ b/hawc_hal/region_of_interest/__init__.py @@ -10,6 +10,6 @@ def get_roi_from_dict(dictionary): :return: """ - roi_type = dictionary['ROI type'] + roi_type = dictionary["ROI type"] return globals()[roi_type].from_dict(dictionary) diff --git a/hawc_hal/region_of_interest/healpix_cone_roi.py b/hawc_hal/region_of_interest/healpix_cone_roi.py index a60d0e1..196f89e 100644 --- a/hawc_hal/region_of_interest/healpix_cone_roi.py +++ b/hawc_hal/region_of_interest/healpix_cone_roi.py @@ -12,9 +12,11 @@ from ..flat_sky_projection import FlatSkyProjection from threeML.io.logging import setup_logger + log = setup_logger(__name__) log.propagate = False + def _get_radians(my_angle): if isinstance(my_angle, u.Quantity): @@ -29,7 +31,6 @@ def _get_radians(my_angle): class HealpixConeROI(HealpixROIBase): - def __init__(self, data_radius, model_radius, *args, **kwargs): """ A cone Region of Interest defined by a center and a radius. @@ -61,24 +62,38 @@ def to_dict(self): ra, dec = self.ra_dec_center - s = {'ROI type': type(self).__name__.split(".")[-1], - 'ra': ra, - 'dec': dec, - 'data_radius_deg': np.rad2deg(self._data_radius_radians), - 'model_radius_deg': np.rad2deg(self._model_radius_radians)} + s = { + "ROI type": type(self).__name__.split(".")[-1], + "ra": ra, + "dec": dec, + "data_radius_deg": np.rad2deg(self._data_radius_radians), + "model_radius_deg": np.rad2deg(self._model_radius_radians), + } return s @classmethod def from_dict(cls, data): - return cls(data['data_radius_deg'], data['model_radius_deg'], ra=data['ra'], dec=data['dec']) + return cls( + data["data_radius_deg"], + data["model_radius_deg"], + ra=data["ra"], + dec=data["dec"], + ) def __str__(self): - s = ("%s: Center (R.A., Dec) = (%.3f, %.3f), data radius = %.3f deg, model radius: %.3f deg" % - (type(self).__name__, self.ra_dec_center[0], self.ra_dec_center[1], - self.data_radius.to(u.deg).value, self.model_radius.to(u.deg).value)) + s = ( + "%s: Center (R.A., Dec) = (%.3f, %.3f), data radius = %.3f deg, model radius: %.3f deg" + % ( + type(self).__name__, + self.ra_dec_center[0], + self.ra_dec_center[1], + self.data_radius.to(u.deg).value, + self.model_radius.to(u.deg).value, + ) + ) return s @@ -120,7 +135,9 @@ def _active_pixels(self, nside, ordering): nest = ordering is _NESTED - pixels_inside_cone = hp.query_disc(nside, vec, self._data_radius_radians, inclusive=False, nest=nest) + pixels_inside_cone = hp.query_disc( + nside, vec, self._data_radius_radians, inclusive=False, nest=nest + ) return pixels_inside_cone @@ -129,13 +146,16 @@ def get_flat_sky_projection(self, pixel_size_deg): # Decide side for image # Compute number of pixels, making sure it is going to be even (by approximating up) - npix_per_side = 2 * int(np.ceil(old_div(np.rad2deg(self._model_radius_radians), pixel_size_deg))) + npix_per_side = 2 * int( + np.ceil(old_div(np.rad2deg(self._model_radius_radians), pixel_size_deg)) + ) # Get lon, lat of center ra, dec = self._get_ra_dec() # This gets a list of all RA, Decs for an AIT-projected image of npix_per_size x npix_per_side - flat_sky_proj = FlatSkyProjection(ra, dec, pixel_size_deg, npix_per_side, npix_per_side) + flat_sky_proj = FlatSkyProjection( + ra, dec, pixel_size_deg, npix_per_side, npix_per_side + ) return flat_sky_proj - diff --git a/hawc_hal/region_of_interest/healpix_map_roi.py b/hawc_hal/region_of_interest/healpix_map_roi.py index d579683..fcdf2c1 100644 --- a/hawc_hal/region_of_interest/healpix_map_roi.py +++ b/hawc_hal/region_of_interest/healpix_map_roi.py @@ -5,6 +5,7 @@ import healpy as hp from threeML.io.logging import setup_logger + log = setup_logger(__name__) log.propagate = False from astromodels.core.sky_direction import SkyDirection @@ -15,8 +16,16 @@ class HealpixMapROI(HealpixROIBase): - - def __init__(self, data_radius, model_radius, roimap=None, roifile=None, threshold=0.5, *args, **kwargs): + def __init__( + self, + data_radius, + model_radius, + roimap=None, + roifile=None, + threshold=0.5, + *args, + **kwargs + ): """ A cone Region of Interest defined by a healpix map (can be read from a fits file). User needs to supply a cone region (center and radius) defining the plane projection for the model map. @@ -43,8 +52,10 @@ def __init__(self, data_radius, model_radius, roimap=None, roifile=None, thresho :param args: arguments for the SkyDirection class of astromodels :param kwargs: keywords for the SkyDirection class of astromodels """ - - assert roifile is not None or roimap is not None, "Must supply either healpix map or fitsfile to create HealpixMapROI" + + assert ( + roifile is not None or roimap is not None + ), "Must supply either healpix map or fitsfile to create HealpixMapROI" self._center = SkyDirection(*args, **kwargs) @@ -56,26 +67,25 @@ def __init__(self, data_radius, model_radius, roimap=None, roifile=None, thresho self.read_map(roimap=roimap, roifile=roifile) - def read_map(self, roimap=None, roifile=None): - assert roifile is not None or roimap is not None, \ - "Must supply either healpix map or fits file" - + assert ( + roifile is not None or roimap is not None + ), "Must supply either healpix map or fits file" + if roimap is not None: roimap = roimap self._filename = None elif roifile is not None: self._filename = roifile - roimap = hp.fitsfunc.read_map(self._filename) + roimap = hp.fitsfunc.read_map(self._filename) self._roimaps = {} self._original_nside = hp.npix2nside(roimap.shape[0]) self._roimaps[self._original_nside] = roimap - - self.check_roi_inside_model() + self.check_roi_inside_model() def check_roi_inside_model(self): @@ -83,44 +93,64 @@ def check_roi_inside_model(self): radius = np.rad2deg(self._model_radius_radians) ra, dec = self.ra_dec_center - temp_roi = HealpixConeROI(data_radius = radius , model_radius=radius, ra=ra, dec=dec) + temp_roi = HealpixConeROI( + data_radius=radius, model_radius=radius, ra=ra, dec=dec + ) - model_pixels = temp_roi.active_pixels( self._original_nside ) + model_pixels = temp_roi.active_pixels(self._original_nside) if not all(p in model_pixels for p in active_pixels): - log.warning("Some pixels inside your ROI are not contained in the model map.") + log.warning( + "Some pixels inside your ROI are not contained in the model map." + ) def to_dict(self): ra, dec = self.ra_dec_center - s = {'ROI type': type(self).__name__.split(".")[-1], - 'ra': ra, - 'dec': dec, - 'model_radius_deg': np.rad2deg(self._model_radius_radians), - 'data_radius_deg': np.rad2deg(self._data_radius_radians), - 'roimap': self._roimaps[self._original_nside], - 'threshold': self._threshold, - 'roifile': self._filename } + s = { + "ROI type": type(self).__name__.split(".")[-1], + "ra": ra, + "dec": dec, + "model_radius_deg": np.rad2deg(self._model_radius_radians), + "data_radius_deg": np.rad2deg(self._data_radius_radians), + "roimap": self._roimaps[self._original_nside], + "threshold": self._threshold, + "roifile": self._filename, + } return s @classmethod def from_dict(cls, data): - - return cls(data['data_radius_deg'], data['model_radius_deg'], threshold=data['threshold'], - roimap=data['roimap'], ra=data['ra'], - dec=data['dec'], roifile=data['roifile']) - def __str__(self): + return cls( + data["data_radius_deg"], + data["model_radius_deg"], + threshold=data["threshold"], + roimap=data["roimap"], + ra=data["ra"], + dec=data["dec"], + roifile=data["roifile"], + ) - s = ("%s: Center (R.A., Dec) = (%.3f, %.3f), model radius: %.3f deg, display radius: %.3f deg, threshold = %.2f" % - (type(self).__name__, self.ra_dec_center[0], self.ra_dec_center[1], - self.model_radius.to(u.deg).value, self.data_radius.to(u.deg).value, self._threshold)) + def __str__(self): - if self._filename is not None: + s = ( + "%s: Center (R.A., Dec) = (%.3f, %.3f), model radius: %.3f deg, display radius: %.3f deg, threshold = %.2f" + % ( + type(self).__name__, + self.ra_dec_center[0], + self.ra_dec_center[1], + self.model_radius.to(u.deg).value, + self.data_radius.to(u.deg).value, + self._threshold, + ) + ) + + if self._filename is not None: s = "%s, data ROI from %s" % (s, self._filename) - + return s def display(self): @@ -153,7 +183,9 @@ def _get_ra_dec(self): def _active_pixels(self, nside, ordering): if not nside in self._roimaps: - self._roimaps[nside] = hp.ud_grade(self._roimaps[self._original_nside], nside_out=nside) + self._roimaps[nside] = hp.ud_grade( + self._roimaps[self._original_nside], nside_out=nside + ) pixels_inside_roi = np.where(self._roimaps[nside] >= self._threshold)[0] @@ -164,13 +196,16 @@ def get_flat_sky_projection(self, pixel_size_deg): # Decide side for image # Compute number of pixels, making sure it is going to be even (by approximating up) - npix_per_side = 2 * int(np.ceil(old_div(np.rad2deg(self._model_radius_radians), pixel_size_deg))) + npix_per_side = 2 * int( + np.ceil(old_div(np.rad2deg(self._model_radius_radians), pixel_size_deg)) + ) # Get lon, lat of center ra, dec = self._get_ra_dec() # This gets a list of all RA, Decs for an AIT-projected image of npix_per_size x npix_per_side - flat_sky_proj = FlatSkyProjection(ra, dec, pixel_size_deg, npix_per_side, npix_per_side) + flat_sky_proj = FlatSkyProjection( + ra, dec, pixel_size_deg, npix_per_side, npix_per_side + ) return flat_sky_proj - diff --git a/hawc_hal/region_of_interest/healpix_roi_base.py b/hawc_hal/region_of_interest/healpix_roi_base.py index 71ab65c..cf6321a 100644 --- a/hawc_hal/region_of_interest/healpix_roi_base.py +++ b/hawc_hal/region_of_interest/healpix_roi_base.py @@ -1,13 +1,13 @@ from builtins import object -_EQUATORIAL = 'equatorial' -_GALACTIC = 'galactic' -_RING = 'RING' -_NESTED = 'NESTED' +_EQUATORIAL = "equatorial" +_GALACTIC = "galactic" +_RING = "RING" +_NESTED = "NESTED" -class HealpixROIBase(object): +class HealpixROIBase(object): def active_pixels(self, nside, system=_EQUATORIAL, ordering=_RING): """ Returns the non-zero elements, i.e., the pixels selected according to this Region Of Interest @@ -23,9 +23,14 @@ def active_pixels(self, nside, system=_EQUATORIAL, ordering=_RING): assert system == _EQUATORIAL, "%s reference system not supported" % system - assert ordering in [_RING, _NESTED], "Could not understand ordering %s. Must be %s or %s" % (ordering, - _RING, - _NESTED) + assert ordering in [ + _RING, + _NESTED, + ], "Could not understand ordering %s. Must be %s or %s" % ( + ordering, + _RING, + _NESTED, + ) return self._active_pixels(nside, ordering) diff --git a/hawc_hal/response/__init__.py b/hawc_hal/response/__init__.py index cb4a892..698350e 100644 --- a/hawc_hal/response/__init__.py +++ b/hawc_hal/response/__init__.py @@ -1,2 +1,2 @@ from __future__ import absolute_import -from .response import hawc_response_factory \ No newline at end of file +from .response import hawc_response_factory diff --git a/hawc_hal/response/response_bin.py b/hawc_hal/response/response_bin.py index 06297f6..057e7da 100644 --- a/hawc_hal/response/response_bin.py +++ b/hawc_hal/response/response_bin.py @@ -112,15 +112,20 @@ def log_log_spectrum(log_energy: float, parameters: np.ndarray): return ( np.log10(parameters[0]) - parameters[1] * log_energy - - np.log10(np.exp(1.0)) * np.power(10.0, log_energy - np.log10(parameters[2])) + - np.log10(np.exp(1.0)) + * np.power(10.0, log_energy - np.log10(parameters[2])) ) else: raise ValueError("Unknown spectral shape.") - this_en_sig_th1d = cls._get_en_th1d(root_file, dec_id, analysis_bin_id, prefix="Sig") + this_en_sig_th1d = cls._get_en_th1d( + root_file, dec_id, analysis_bin_id, prefix="Sig" + ) - this_en_bg_th1d = cls._get_en_th1d(root_file, dec_id, analysis_bin_id, prefix="Bg") + this_en_bg_th1d = cls._get_en_th1d( + root_file, dec_id, analysis_bin_id, prefix="Bg" + ) total_bins = this_en_sig_th1d.shape[0] sim_energy_bin_low = np.zeros(total_bins) @@ -163,11 +168,15 @@ def log_log_spectrum(log_energy: float, parameters: np.ndarray): try: psf_prefix = f"dec_{dec_id:02d}/nh_{analysis_bin_id}" - psf_tf1_metadata = root_file[f"{psf_prefix}/PSF_dec{dec_id}_nh{analysis_bin_id}_fit"] + psf_tf1_metadata = root_file[ + f"{psf_prefix}/PSF_dec{dec_id}_nh{analysis_bin_id}_fit" + ] except uproot.KeyInFileError: psf_prefix = f"dec_{dec_id:02d}/nh_0{analysis_bin_id}" - psf_tf1_metadata = root_file[f"{psf_prefix}/PSF_dec{dec_id}_nh{analysis_bin_id}_fit"] + psf_tf1_metadata = root_file[ + f"{psf_prefix}/PSF_dec{dec_id}_nh{analysis_bin_id}_fit" + ] psf_tf1_fparams = psf_tf1_metadata.member("fParams") @@ -241,7 +250,9 @@ def combine_with_weights(self, other_response_bin, dec_center, w1, w2): n_sim_signal_events = ( w1 * self._sim_n_sig_events + w2 * other_response_bin._sim_n_sig_events ) - n_sim_bkg_events = w1 * self._sim_n_bg_events + w2 * other_response_bin._sim_n_bg_events + n_sim_bkg_events = ( + w1 * self._sim_n_bg_events + w2 * other_response_bin._sim_n_bg_events + ) # We assume that the bin centers are the same assert np.allclose( diff --git a/hawc_hal/serialize.py b/hawc_hal/serialize.py index fa1b196..f85cb58 100644 --- a/hawc_hal/serialize.py +++ b/hawc_hal/serialize.py @@ -5,8 +5,7 @@ # This object is to decouple the serialization from any particular implementation. At the moment we use HDF5 through # pandas but this might change in the future. Without changing the external API, only changes here will be necessary. class Serialization(object): - - def __init__(self, filename, mode='r', compress=True): + def __init__(self, filename, mode="r", compress=True): self._filename = filename self._compress = compress @@ -16,7 +15,9 @@ def __enter__(self): if self._compress: - self._store = HDFStore(self._filename, complib='blosc:lz4', complevel=9, mode=self._mode) + self._store = HDFStore( + self._filename, complib="blosc:lz4", complevel=9, mode=self._mode + ) else: # pragma: no cover @@ -35,7 +36,7 @@ def keys(self): def store_pandas_object(self, path, obj, **metadata): - self._store.put(path, obj, format='fixed') + self._store.put(path, obj, format="fixed") self._store.get_storer(path).attrs.metadata = metadata diff --git a/hawc_hal/sphere_dist.py b/hawc_hal/sphere_dist.py index ec4ca6c..db0fd24 100644 --- a/hawc_hal/sphere_dist.py +++ b/hawc_hal/sphere_dist.py @@ -28,7 +28,7 @@ def sphere_dist(ra1, dec1, ra2, dec2): # pragma: no cover dlon = lon2 - lon1 dlat = lat2 - lat1 - a = np.sin(dlat/2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon /2.0)**2 + a = np.sin(dlat / 2.0) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2.0) ** 2 c = 2 * np.arcsin(np.sqrt(a)) return c * rad2deg diff --git a/hawc_hal/tests/conftest.py b/hawc_hal/tests/conftest.py index f3706a0..5ece37c 100644 --- a/hawc_hal/tests/conftest.py +++ b/hawc_hal/tests/conftest.py @@ -77,8 +77,12 @@ def check_map_trees(m1, m2): p1 = m1[p_key] p2 = m2[p_key] - assert np.allclose(p1.observation_map.as_partial(), p2.observation_map.as_partial()) - assert np.allclose(p1.background_map.as_partial(), p2.background_map.as_partial()) + assert np.allclose( + p1.observation_map.as_partial(), p2.observation_map.as_partial() + ) + assert np.allclose( + p1.background_map.as_partial(), p2.background_map.as_partial() + ) assert p1.nside == p2.nside assert p1.n_transits == p2.n_transits @@ -128,7 +132,9 @@ def check_responses(r1, r2): rb1.sim_differential_photon_fluxes, rb2.sim_differential_photon_fluxes ) - assert np.allclose(rb1.sim_signal_events_per_bin, rb2.sim_signal_events_per_bin) + assert np.allclose( + rb1.sim_signal_events_per_bin, rb2.sim_signal_events_per_bin + ) # Test PSF assert np.allclose(rb1.psf.xs, rb2.psf.xs) diff --git a/hawc_hal/tests/test_bilinar_interpolation.py b/hawc_hal/tests/test_bilinar_interpolation.py index e795588..d7e11e1 100644 --- a/hawc_hal/tests/test_bilinar_interpolation.py +++ b/hawc_hal/tests/test_bilinar_interpolation.py @@ -15,11 +15,15 @@ def test_fast_bilinear_interpolation(): new_y = np.random.uniform(min(gridy), max(gridy), 500) new_coords = np.asarray(cartesian([new_x, new_y])) - mfi = fast_bilinar_interpolation.FastBilinearInterpolation(data.shape, (new_coords[:, 0], new_coords[:, 1])) + mfi = fast_bilinar_interpolation.FastBilinearInterpolation( + data.shape, (new_coords[:, 0], new_coords[:, 1]) + ) v1 = mfi(data) # Check against the slower scipy interpolator in map_coordinates - v2 = scipy.ndimage.map_coordinates(data, np.array((new_coords[:, 0], new_coords[:, 1])), order=1) + v2 = scipy.ndimage.map_coordinates( + data, np.array((new_coords[:, 0], new_coords[:, 1])), order=1 + ) assert np.allclose(v1, v2) diff --git a/hawc_hal/tests/test_complete_analysis.py b/hawc_hal/tests/test_complete_analysis.py index 3c66558..d775330 100644 --- a/hawc_hal/tests/test_complete_analysis.py +++ b/hawc_hal/tests/test_complete_analysis.py @@ -6,15 +6,12 @@ from conftest import point_source_model -@pytest.fixture(scope='module') +@pytest.fixture(scope="module") def test_fit(roi, maptree, response, point_source_model): pts_model = point_source_model - hawc = HAL("HAWC", - maptree, - response, - roi) + hawc = HAL("HAWC", maptree, response, roi) # Use from bin 1 to bin 9 hawc.set_active_measurements(1, 9) @@ -37,7 +34,7 @@ def test_fit(roi, maptree, response, point_source_model): def test_simulation(test_fit): jl, hawc, pts_model, param_df, like_df, data = test_fit - + sim = hawc.get_simulated_dataset("HAWCsim") sim.write("sim_resp.hd5", "sim_maptree.hd5") @@ -81,21 +78,28 @@ def test_goodness(test_fit): gf = GoodnessOfFit(jl) gof, param, likes = gf.by_mc(10) - print("Prob. of obtaining -log(like) >= observed by chance if null hypothesis is true: %.2f" % gof['HAWC']) + print( + "Prob. of obtaining -log(like) >= observed by chance if null hypothesis is true: %.2f" + % gof["HAWC"] + ) # it is a good idea to inspect the results of the simulations with some plots # Histogram of likelihood values fig, sub = plt.subplots() likes.hist(ax=sub) # Overplot a vertical dashed line on the observed value - plt.axvline(jl.results.get_statistic_frame().loc['HAWC', '-log(likelihood)'], - color='black', - linestyle='--') + plt.axvline( + jl.results.get_statistic_frame().loc["HAWC", "-log(likelihood)"], + color="black", + linestyle="--", + ) fig.savefig("hal_sim_all_likes.png") # Plot the value of beta for all simulations (for example) fig, sub = plt.subplots() - param.loc[(slice(None), ['pts.spectrum.main.Cutoff_powerlaw.index']), 'value'].plot() + param.loc[ + (slice(None), ["pts.spectrum.main.Cutoff_powerlaw.index"]), "value" + ].plot() fig.savefig("hal_sim_all_index.png") @@ -103,7 +107,7 @@ def test_fit_with_free_position(test_fit): jl, hawc, pts_model, param_df, like_df, data = test_fit - hawc.psf_integration_method = 'fast' + hawc.psf_integration_method = "fast" # Free the position of the source pts_model.pts.position.ra.free = True @@ -126,17 +130,26 @@ def test_fit_with_free_position(test_fit): # pts.position.dec(2.214 + / - 0.00025) x # 10 - a, b, cc, fig = jl.get_contours(pts_model.pts.position.dec, 22.13, 22.1525, 5, - pts_model.pts.position.ra, 83.615, 83.635, 5) - - plt.plot([ra], [dec], 'x') + a, b, cc, fig = jl.get_contours( + pts_model.pts.position.dec, + 22.13, + 22.1525, + 5, + pts_model.pts.position.ra, + 83.615, + 83.635, + 5, + ) + + plt.plot([ra], [dec], "x") fig.savefig("hal_src_localization.png") - hawc.psf_integration_method = 'exact' + hawc.psf_integration_method = "exact" pts_model.pts.position.ra.free = False pts_model.pts.position.dec.free = False + def test_bayesian_analysis(test_fit): jl, hawc, pts_model, param_df, like_df, data = test_fit @@ -168,4 +181,3 @@ def test_bayesian_analysis(test_fit): fig = bs.results.corner_plot() fig.savefig("hal_corner_plot.png") - diff --git a/hawc_hal/tests/test_copy.py b/hawc_hal/tests/test_copy.py index c42cb16..fe98933 100644 --- a/hawc_hal/tests/test_copy.py +++ b/hawc_hal/tests/test_copy.py @@ -7,14 +7,15 @@ from conftest import maptree, response + def deepcopy_hal(theMaptree, theResponse, extended=False): src_ra, src_dec = 82.628, 22.640 - src_name = 'test_source' + src_name = "test_source" - roi = HealpixConeROI(data_radius=5., model_radius=8., ra=src_ra, dec=src_dec) + roi = HealpixConeROI(data_radius=5.0, model_radius=8.0, ra=src_ra, dec=src_dec) - hawc = HAL('HAWC', theMaptree, theResponse, roi) + hawc = HAL("HAWC", theMaptree, theResponse, roi) hawc.set_active_measurements(1, 9) data = DataList(hawc) @@ -32,9 +33,11 @@ def deepcopy_hal(theMaptree, theResponse, extended=False): hawc_copy = copy.deepcopy(hawc) + def test_deepcopy_point_source(maptree, response): deepcopy_hal(maptree, response, extended=False) -#@pytest.mark.xfail + +# @pytest.mark.xfail def test_deepcopy_extended_source(maptree, response): deepcopy_hal(maptree, response, extended=True) diff --git a/hawc_hal/tests/test_geminga_paper.py b/hawc_hal/tests/test_geminga_paper.py index ca6d44b..14feb13 100644 --- a/hawc_hal/tests/test_geminga_paper.py +++ b/hawc_hal/tests/test_geminga_paper.py @@ -4,6 +4,7 @@ try: import ROOT + ROOT.PyConfig.IgnoreCommandLineOptions = True except: pass @@ -13,11 +14,26 @@ from collections import namedtuple import numpy as np -def test_geminga_paper(geminga_maptree, geminga_response): - Args_fake = namedtuple('args', 'mtfile,rsfile,startBin,stopBin,RA,Dec,uratio,delta,ROI,output,plugin') +def test_geminga_paper(geminga_maptree, geminga_response): - args = Args_fake(geminga_maptree, geminga_response, 1, 9, 98.5, 17.76, 1.12, 0.3333, 0, 'output.txt', 'new') + Args_fake = namedtuple( + "args", "mtfile,rsfile,startBin,stopBin,RA,Dec,uratio,delta,ROI,output,plugin" + ) + + args = Args_fake( + geminga_maptree, + geminga_response, + 1, + 9, + 98.5, + 17.76, + 1.12, + 0.3333, + 0, + "output.txt", + "new", + ) param, like_df, TS = go(args) @@ -27,9 +43,11 @@ def test_geminga_paper(geminga_maptree, geminga_response): # B0656.spectrum.main.Powerlaw.K (6.0 -1.3 +1.7) x 10^-24 1 / (cm2 keV s) # B0656.spectrum.main.Powerlaw.index -2.140 +/- 0.17 - assert np.allclose(param.loc[:, 'value'].values, - [5.38278446e+00, 1.40122099e-23, -2.34261469e+00, 5.98438658e-24, -2.13846297e+00], - rtol=5e-2) + assert np.allclose( + param.loc[:, "value"].values, + [5.38278446e00, 1.40122099e-23, -2.34261469e00, 5.98438658e-24, -2.13846297e00], + rtol=5e-2, + ) def go(args): @@ -37,11 +55,9 @@ def go(args): spectrum = Powerlaw() shape = Continuous_injection_diffusion_legacy() - source = ExtendedSource("Geminga", - spatial_shape=shape, - spectral_shape=spectrum) + source = ExtendedSource("Geminga", spatial_shape=shape, spectral_shape=spectrum) - fluxUnit = 1. / (u.TeV * u.cm ** 2 * u.s) + fluxUnit = 1.0 / (u.TeV * u.cm**2 * u.s) # Set spectral parameters spectrum.K = 1e-14 * fluxUnit @@ -51,7 +67,7 @@ def go(args): spectrum.piv.fix = True spectrum.index = -2.4 - spectrum.index.bounds = (-4., -1.) + spectrum.index.bounds = (-4.0, -1.0) # Set spatial parameters shape.lon0 = args.RA * u.degree @@ -62,7 +78,7 @@ def go(args): shape.rdiff0 = 6.0 * u.degree shape.rdiff0.fix = False - shape.rdiff0.max_value = 12. + shape.rdiff0.max_value = 12.0 shape.delta.min_value = 0.1 shape.delta = args.delta @@ -80,11 +96,9 @@ def go(args): spectrum2 = Powerlaw() shape2 = Continuous_injection_diffusion_legacy() - source2 = ExtendedSource("B0656", - spatial_shape=shape2, - spectral_shape=spectrum2) + source2 = ExtendedSource("B0656", spatial_shape=shape2, spectral_shape=spectrum2) - fluxUnit = 1. / (u.TeV * u.cm ** 2 * u.s) + fluxUnit = 1.0 / (u.TeV * u.cm**2 * u.s) # Set spectral parameters for the 2nd source spectrum2.K = 1e-14 * fluxUnit @@ -95,7 +109,7 @@ def go(args): spectrum2.index = -2.2 # spectrum2.index.fix = True - spectrum2.index.bounds = (-4., -1.) + spectrum2.index.bounds = (-4.0, -1.0) # Set spatial parameters for the 2nd source shape2.lon0 = 104.95 * u.degree @@ -106,7 +120,7 @@ def go(args): shape2.rdiff0 = 6.0 * u.degree shape2.rdiff0.fix = False - shape2.rdiff0.max_value = 12. + shape2.rdiff0.max_value = 12.0 shape2.delta.min_value = 0.2 shape2.delta = args.delta @@ -132,7 +146,7 @@ def go(args): ra_c, dec_c, rad = (None, None, None) if args.ROI == 0: - ra_c, dec_c, rad = 101.7, 16, 9. + ra_c, dec_c, rad = 101.7, 16, 9.0 # llh.set_ROI(101.7, 16, 9., True) elif args.ROI == 1: ra_c, dec_c, rad = 98.5, 17.76, 4.5 @@ -141,13 +155,13 @@ def go(args): ra_c, dec_c, rad = 97, 18.5, 6 # llh.set_ROI(97, 18.5, 6, True) elif args.ROI == 3: - ra_c, dec_c, rad = 104.95, 14.24, 3. + ra_c, dec_c, rad = 104.95, 14.24, 3.0 # llh.set_ROI(104.95, 14.24, 3., True) elif args.ROI == 4: ra_c, dec_c, rad = 107, 13, 5.4 # llh.set_ROI(107, 13, 5.4, True) - if args.plugin == 'old': + if args.plugin == "old": llh = HAWCLike("Geminga", args.mtfile, args.rsfile, fullsky=True) llh.set_active_measurements(args.startBin, args.stopBin) @@ -155,25 +169,20 @@ def go(args): else: - roi = HealpixConeROI(data_radius=rad, - model_radius=rad + 10.0, - ra=ra_c, dec=dec_c) - - llh = HAL("HAWC", - args.mtfile, - args.rsfile, - roi) - + roi = HealpixConeROI( + data_radius=rad, model_radius=rad + 10.0, ra=ra_c, dec=dec_c + ) + + llh = HAL("HAWC", args.mtfile, args.rsfile, roi) + llh.set_active_measurements(args.startBin, args.stopBin) print(lm) # we fit a common diffusion coefficient so parameters are linked if "B0656" in lm and "Geminga" in lm: - law = Line(b=250. / 288., a=0.) - lm.link(lm.B0656.spatial_shape.rdiff0, - lm.Geminga.spatial_shape.rdiff0, - law) + law = Line(b=250.0 / 288.0, a=0.0) + lm.link(lm.B0656.spatial_shape.rdiff0, lm.Geminga.spatial_shape.rdiff0, law) lm.B0656.spatial_shape.rdiff0.Line.a.fix = True lm.B0656.spatial_shape.rdiff0.Line.b.fix = True @@ -182,10 +191,10 @@ def go(args): print(lm) # Set up the likelihood and run the fit - TS = 0. + TS = 0.0 try: - + lm.Geminga.spatial_shape.rdiff0 = 5.5 lm.Geminga.spatial_shape.rdiff0.fix = False lm.Geminga.spectrum.main.Powerlaw.K = 1.36e-23 @@ -218,23 +227,23 @@ def go(args): # Print the TS, significance, and fit parameters, and then plot stuff print("\nTest statistic:") - if args.plugin == 'old': + if args.plugin == "old": TS = llh.calc_TS() else: - + if "B0656" in lm and "Geminga" in lm: lm.unlink(lm.B0656.spatial_shape.rdiff0) - + TS_df = jl.compute_TS("Geminga", like_df) - TS = TS_df.loc['HAWC', 'TS'] - + TS = TS_df.loc["HAWC", "TS"] + TS_df2 = jl.compute_TS("B0656", like_df) - TS2 = TS_df2.loc['HAWC', 'TS'] - - print (TS_df) - print (TS_df2) + TS2 = TS_df2.loc["HAWC", "TS"] + + print(TS_df) + print(TS_df2) print("Test statistic: %g" % TS) @@ -264,27 +273,73 @@ def go(args): if __name__ == "__main__": p = argparse.ArgumentParser(description="Example spectral fit with LiFF") - p.add_argument("-m", "--maptreefile", dest="mtfile", - help="LiFF MapTree ROOT file", default="./maptree.root") - p.add_argument("-r", "--responsefile", dest="rsfile", - help="LiFF detector response ROOT file", default="./response.root") - p.add_argument("--startBin", dest="startBin", default=1, type=int, - help="Starting analysis bin [0..9]") - p.add_argument("--stopBin", dest="stopBin", default=9, type=int, - help="Stopping analysis bin [0..9]") - p.add_argument("--RA", default=98.5, type=float, - help="Source RA in degrees (Geminga default)") - p.add_argument("--Dec", default=17.76, type=float, - help="Source Dec in degrees (Geminga default)") - p.add_argument("--uratio", dest="uratio", default=1.12, type=float, - help="the ratio of energy density between CMB and B. 1.12 means B=3uG and CMB=0.25") - p.add_argument("--delta", dest="delta", default=0.3333, type=float, - help="Diffusion spectral index (0.3 to 0.6)") + p.add_argument( + "-m", + "--maptreefile", + dest="mtfile", + help="LiFF MapTree ROOT file", + default="./maptree.root", + ) + p.add_argument( + "-r", + "--responsefile", + dest="rsfile", + help="LiFF detector response ROOT file", + default="./response.root", + ) + p.add_argument( + "--startBin", + dest="startBin", + default=1, + type=int, + help="Starting analysis bin [0..9]", + ) + p.add_argument( + "--stopBin", + dest="stopBin", + default=9, + type=int, + help="Stopping analysis bin [0..9]", + ) + p.add_argument( + "--RA", default=98.5, type=float, help="Source RA in degrees (Geminga default)" + ) + p.add_argument( + "--Dec", + default=17.76, + type=float, + help="Source Dec in degrees (Geminga default)", + ) + p.add_argument( + "--uratio", + dest="uratio", + default=1.12, + type=float, + help="the ratio of energy density between CMB and B. 1.12 means B=3uG and CMB=0.25", + ) + p.add_argument( + "--delta", + dest="delta", + default=0.3333, + type=float, + help="Diffusion spectral index (0.3 to 0.6)", + ) p.add_argument("--ROI", dest="ROI", default=0, type=int) - p.add_argument("-o", "--output", dest="output", default="output.txt", - help="Parameter output file.") - p.add_argument("--plugin", dest='plugin', default='old', type=str, - help="Old or new", choices=['new', 'old']) + p.add_argument( + "-o", + "--output", + dest="output", + default="output.txt", + help="Parameter output file.", + ) + p.add_argument( + "--plugin", + dest="plugin", + default="old", + type=str, + help="Old or new", + choices=["new", "old"], + ) args = p.parse_args() diff --git a/hawc_hal/tests/test_healpixRoi.py b/hawc_hal/tests/test_healpixRoi.py index 4cda5da..c8682a5 100644 --- a/hawc_hal/tests/test_healpixRoi.py +++ b/hawc_hal/tests/test_healpixRoi.py @@ -12,86 +12,91 @@ from conftest import check_map_trees -def Sky2Vec( ra, dec ): - c = SkyCoord( frame = "icrs", ra=ra*u.degree, dec=dec*u.degree ) - theta = (90.0*u.degree-c.dec).to(u.radian).value - phi = c.ra.to(u.radian).value - vec = hp.pixelfunc.ang2vec(theta, phi) - return vec - -NSIDE=512 - -def test_healpixRoi(geminga_maptree,geminga_response): - - #test to make sure writing a model with HealpixMapROI works fine - ra, dec = 101.7, 16. - data_radius = 9. - model_radius = 24. - - m = np.zeros(hp.nside2npix(NSIDE)) - vec = Sky2Vec(ra, dec) - m[hp.query_disc(NSIDE, vec, (data_radius*u.degree).to(u.radian).value, inclusive=False)] = 1 - - #hp.fitsfunc.write_map("roitemp.fits" , m, nest=False, coord="C", partial=False, overwrite=True ) - - map_roi = HealpixMapROI(data_radius=data_radius, ra=ra, dec=dec, model_radius=model_radius, roimap=m) - #fits_roi = HealpixMapROI(data_radius=data_radius, ra=ra, dec=dec, model_radius=model_radius, roifile="roitemp.fits") - hawc = HAL("HAWC",geminga_maptree,geminga_response,map_roi) - hawc.set_active_measurements(1,9) - - ''' +def Sky2Vec(ra, dec): + c = SkyCoord(frame="icrs", ra=ra * u.degree, dec=dec * u.degree) + theta = (90.0 * u.degree - c.dec).to(u.radian).value + phi = c.ra.to(u.radian).value + vec = hp.pixelfunc.ang2vec(theta, phi) + return vec + + +NSIDE = 512 + + +def test_healpixRoi(geminga_maptree, geminga_response): + + # test to make sure writing a model with HealpixMapROI works fine + ra, dec = 101.7, 16.0 + data_radius = 9.0 + model_radius = 24.0 + + m = np.zeros(hp.nside2npix(NSIDE)) + vec = Sky2Vec(ra, dec) + m[ + hp.query_disc( + NSIDE, vec, (data_radius * u.degree).to(u.radian).value, inclusive=False + ) + ] = 1 + + # hp.fitsfunc.write_map("roitemp.fits" , m, nest=False, coord="C", partial=False, overwrite=True ) + + map_roi = HealpixMapROI( + data_radius=data_radius, ra=ra, dec=dec, model_radius=model_radius, roimap=m + ) + # fits_roi = HealpixMapROI(data_radius=data_radius, ra=ra, dec=dec, model_radius=model_radius, roifile="roitemp.fits") + hawc = HAL("HAWC", geminga_maptree, geminga_response, map_roi) + hawc.set_active_measurements(1, 9) + + """ Define model: Two sources, 1 point, 1 extended Same declination, but offset in RA Different spectral idnex, but both power laws - ''' - pt_shift=3.0 - ext_shift=2.0 - - # First soource - spectrum1 = Powerlaw() - source1 = PointSource("point", ra=ra+pt_shift,dec=dec,spectral_shape=spectrum1) - - spectrum1.K = 1e-12 / (u.TeV * u.cm **2 * u.s) - spectrum1.piv = 1* u.TeV - spectrum1.index = -2.3 - - spectrum1.piv.fix = True - spectrum1.K.fix = True - spectrum1.index.fix = True - - # Second source - shape = Gaussian_on_sphere(lon0=ra - ext_shift,lat0=dec,sigma=0.3) - spectrum2 = Powerlaw() - source2 = ExtendedSource("extended",spatial_shape=shape,spectral_shape=spectrum2) - - spectrum2.K = 1e-12 / (u.TeV * u.cm **2 * u.s) - spectrum2.piv = 1* u.TeV - spectrum2.index = -2.0 - - spectrum2.piv.fix = True - spectrum2.K.fix = True - spectrum2.index.fix = True - - shape.lon0.fix = True - shape.lat0.fix = True - shape.sigma.fix = True - - model = Model(source1,source2) - - hawc.set_model(model) - - # Write the model map - model_map_tree=hawc.write_model_map("test.hd5",test_return_map=True) - - # Read the model back - hawc_model = map_tree_factory('test.hd5',map_roi) - - # Check written model and read model are the same - check_map_trees(hawc_model,model_map_tree) - - - os.remove( "test.hd5" ) - - + """ + pt_shift = 3.0 + ext_shift = 2.0 + + # First soource + spectrum1 = Powerlaw() + source1 = PointSource("point", ra=ra + pt_shift, dec=dec, spectral_shape=spectrum1) + + spectrum1.K = 1e-12 / (u.TeV * u.cm**2 * u.s) + spectrum1.piv = 1 * u.TeV + spectrum1.index = -2.3 + + spectrum1.piv.fix = True + spectrum1.K.fix = True + spectrum1.index.fix = True + + # Second source + shape = Gaussian_on_sphere(lon0=ra - ext_shift, lat0=dec, sigma=0.3) + spectrum2 = Powerlaw() + source2 = ExtendedSource("extended", spatial_shape=shape, spectral_shape=spectrum2) + + spectrum2.K = 1e-12 / (u.TeV * u.cm**2 * u.s) + spectrum2.piv = 1 * u.TeV + spectrum2.index = -2.0 + + spectrum2.piv.fix = True + spectrum2.K.fix = True + spectrum2.index.fix = True + + shape.lon0.fix = True + shape.lat0.fix = True + shape.sigma.fix = True + + model = Model(source1, source2) + + hawc.set_model(model) + + # Write the model map + model_map_tree = hawc.write_model_map("test.hd5", test_return_map=True) + + # Read the model back + hawc_model = map_tree_factory("test.hd5", map_roi) + + # Check written model and read model are the same + check_map_trees(hawc_model, model_map_tree) + + os.remove("test.hd5") diff --git a/hawc_hal/tests/test_maptree.py b/hawc_hal/tests/test_maptree.py index 4e60f04..4e7ccb3 100644 --- a/hawc_hal/tests/test_maptree.py +++ b/hawc_hal/tests/test_maptree.py @@ -5,8 +5,7 @@ from conftest import check_map_trees -def test_constructor(maptree, - roi): +def test_constructor(maptree, roi): roi_ = roi @@ -26,8 +25,8 @@ def test_constructor(maptree, # Check corner cases # This should issue a warning because we saved the maptree with a ROI and we try to use # without one - - #with pytest.warns(UserWarning): + + # with pytest.warns(UserWarning): _ = map_tree_factory(test_filename, None) # Now try to load with a different ROI than the one used for the file @@ -39,10 +38,12 @@ def test_constructor(maptree, _ = map_tree_factory(test_filename, oroi) # This instead should work because the ROI is different, but contained in the ROI of the file - smaller_roi = HealpixConeROI(data_radius=roi.data_radius / 2.0, - model_radius=roi.model_radius, - ra=ra_c - 0.2, - dec=dec_c + 0.15) + smaller_roi = HealpixConeROI( + data_radius=roi.data_radius / 2.0, + model_radius=roi.model_radius, + ra=ra_c - 0.2, + dec=dec_c + 0.15, + ) _ = map_tree_factory(test_filename, smaller_roi) diff --git a/hawc_hal/tests/test_model_residual_maps.py b/hawc_hal/tests/test_model_residual_maps.py index b0df547..8d5067f 100644 --- a/hawc_hal/tests/test_model_residual_maps.py +++ b/hawc_hal/tests/test_model_residual_maps.py @@ -21,15 +21,16 @@ not has_root, reason="No ROOT available" ) + @skip_if_ROOT_is_not_available def test_model_residual_maps(geminga_maptree, geminga_response, geminga_roi): - #data_radius = 5.0 - #model_radius = 7.0 + # data_radius = 5.0 + # model_radius = 7.0 output = dirname(geminga_maptree) ra_src, dec_src = 101.7, 16.0 - maptree, response, roi = geminga_maptree, geminga_response, geminga_roi + maptree, response, roi = geminga_maptree, geminga_response, geminga_roi hawc = HAL("HAWC", maptree, response, roi) @@ -39,22 +40,24 @@ def test_model_residual_maps(geminga_maptree, geminga_response, geminga_roi): # Display information about the data loaded and the ROI hawc.display() - ''' + """ Define model: Two sources, 1 point, 1 extended Same declination, but offset in RA Different spectral index, but both power laws - ''' - pt_shift=3.0 + """ + pt_shift = 3.0 ext_shift = 2.0 # First source spectrum1 = Powerlaw() - source1 = PointSource("point", ra=ra_src + pt_shift, dec=dec_src, spectral_shape=spectrum1) + source1 = PointSource( + "point", ra=ra_src + pt_shift, dec=dec_src, spectral_shape=spectrum1 + ) - spectrum1.K = 1e-12 / (u.TeV * u.cm ** 2 * u.s) + spectrum1.K = 1e-12 / (u.TeV * u.cm**2 * u.s) spectrum1.piv = 1 * u.TeV spectrum1.index = -2.3 @@ -67,13 +70,13 @@ def test_model_residual_maps(geminga_maptree, geminga_response, geminga_roi): spectrum2 = Powerlaw() source2 = ExtendedSource("extended", spatial_shape=shape, spectral_shape=spectrum2) - spectrum2.K = 1e-12 / (u.TeV * u.cm ** 2 * u.s) + spectrum2.K = 1e-12 / (u.TeV * u.cm**2 * u.s) spectrum2.piv = 1 * u.TeV - spectrum2.index = -2.0 + spectrum2.index = -2.0 - shape.lon0.fix=True - shape.lat0.fix=True - shape.sigma.fix=True + shape.lon0.fix = True + shape.lat0.fix = True + shape.sigma.fix = True spectrum2.piv.fix = True spectrum2.K.fix = True @@ -88,7 +91,7 @@ def test_model_residual_maps(geminga_maptree, geminga_response, geminga_roi): # Define the JointLikelihood object (glue the data to the model) jl = JointLikelihood(model, data, verbose=False) - # This has the effect of loading the model cache + # This has the effect of loading the model cache fig = hawc.display_spectrum() # the test file names @@ -96,13 +99,16 @@ def test_model_residual_maps(geminga_maptree, geminga_response, geminga_roi): residual_file_name = "{0}/test_residual.hdf5".format(output) # Write the map trees for testing - model_map_tree = hawc.write_model_map(model_file_name, poisson_fluctuate=True, test_return_map=True) - residual_map_tree = hawc.write_residual_map(residual_file_name, test_return_map=True) + model_map_tree = hawc.write_model_map( + model_file_name, poisson_fluctuate=True, test_return_map=True + ) + residual_map_tree = hawc.write_residual_map( + residual_file_name, test_return_map=True + ) # Read the maps back in - hawc_model = map_tree_factory(model_file_name,roi) - hawc_residual = map_tree_factory(residual_file_name,roi) - + hawc_model = map_tree_factory(model_file_name, roi) + hawc_residual = map_tree_factory(residual_file_name, roi) check_map_trees(hawc_model, model_map_tree) check_map_trees(hawc_residual, residual_map_tree) diff --git a/hawc_hal/tests/test_n_transits.py b/hawc_hal/tests/test_n_transits.py index 7a26400..ff82806 100644 --- a/hawc_hal/tests/test_n_transits.py +++ b/hawc_hal/tests/test_n_transits.py @@ -19,7 +19,7 @@ def test_transits(maptree, roi): # Case 1: specify number of transits n_transits = 777.7 - maptree_ntransits = map_tree_factory(maptree,roi_,n_transits) + maptree_ntransits = map_tree_factory(maptree, roi_, n_transits) # does the maptree return the specified transits? check_n_transits(maptree_ntransits, n_transits) @@ -29,5 +29,3 @@ def test_transits(maptree, roi): n_transits = maptree_unspecifed.n_transits check_n_transits(maptree_unspecifed, n_transits) - - \ No newline at end of file diff --git a/hawc_hal/tests/test_on_point_source.py b/hawc_hal/tests/test_on_point_source.py index 7cff27d..b3b08ee 100644 --- a/hawc_hal/tests/test_on_point_source.py +++ b/hawc_hal/tests/test_on_point_source.py @@ -3,17 +3,9 @@ import argparse -def test_on_point_source(roi, - maptree, - response, - point_source_model, - liff=False): +def test_on_point_source(roi, maptree, response, point_source_model, liff=False): - fit_point_source(roi, - maptree, - response, - point_source_model, - liff=liff) + fit_point_source(roi, maptree, response, point_source_model, liff=liff) if __name__ == "__main__": @@ -30,23 +22,40 @@ def test_on_point_source(roi, def_ra_roi, def_dec_roi = def_roi.ra_dec_center parser = argparse.ArgumentParser() - parser.add_argument("--liff", help="Use LIFF instead of HAL (for benchmarking)", action="store_true") + parser.add_argument( + "--liff", help="Use LIFF instead of HAL (for benchmarking)", action="store_true" + ) parser.add_argument("--ra", help="RA of source", type=float, default=def_ra) parser.add_argument("--dec", help="Dec of source", type=float, default=def_dec) - parser.add_argument("--ra_roi", help="RA of center of ROI", type=float, default=def_ra_roi) - parser.add_argument("--dec_roi", help="Dec of center of ROI", type=float, default=def_dec_roi) - parser.add_argument("--data_radius", help="Radius of the data ROI", type=float, default=def_drad) - parser.add_argument("--model_radius", help="Radius of the model ROI", type=float, default=def_mrad) + parser.add_argument( + "--ra_roi", help="RA of center of ROI", type=float, default=def_ra_roi + ) + parser.add_argument( + "--dec_roi", help="Dec of center of ROI", type=float, default=def_dec_roi + ) + parser.add_argument( + "--data_radius", help="Radius of the data ROI", type=float, default=def_drad + ) + parser.add_argument( + "--model_radius", help="Radius of the model ROI", type=float, default=def_mrad + ) parser.add_argument("--maptree", help="Maptree", type=str, default=maptree()) parser.add_argument("--response", help="Response", type=str, default=response()) - parser.add_argument("--free_position", help='Use this to set the position free', action='store_true', default=False) + parser.add_argument( + "--free_position", + help="Use this to set the position free", + action="store_true", + default=False, + ) args = parser.parse_args() - roi = HealpixConeROI(data_radius=args.data_radius, - model_radius=args.model_radius, - ra=args.ra_roi, - dec=args.dec_roi) + roi = HealpixConeROI( + data_radius=args.data_radius, + model_radius=args.model_radius, + ra=args.ra_roi, + dec=args.dec_roi, + ) pts_model = point_source_model(ra=args.ra, dec=args.dec) @@ -55,8 +64,10 @@ def test_on_point_source(roi, pts_model.pts.position.ra.free = True pts_model.pts.position.dec.free = True - test_on_point_source(roi, - liff=args.liff, - point_source_model=pts_model, - maptree=args.maptree, - response=args.response) + test_on_point_source( + roi, + liff=args.liff, + point_source_model=pts_model, + maptree=args.maptree, + response=args.response, + ) diff --git a/hawc_hal/tests/test_psf.py b/hawc_hal/tests/test_psf.py index 47c63bc..833340e 100644 --- a/hawc_hal/tests/test_psf.py +++ b/hawc_hal/tests/test_psf.py @@ -3,6 +3,7 @@ import pytest from hawc_hal.psf_fast import InvalidPSF, InvalidPSFError + def test_invalid_psf(): psf = InvalidPSF() diff --git a/hawc_hal/tests/test_roi.py b/hawc_hal/tests/test_roi.py index 55e918b..d0ed25f 100644 --- a/hawc_hal/tests/test_roi.py +++ b/hawc_hal/tests/test_roi.py @@ -9,53 +9,65 @@ import os -def Sky2Vec( ra, dec ): - c = SkyCoord( frame = "icrs", ra=ra*u.degree, dec=dec*u.degree ) - theta = (90.0*u.degree-c.dec).to(u.radian).value - phi = c.ra.to(u.radian).value - vec = hp.pixelfunc.ang2vec(theta, phi) - return vec +def Sky2Vec(ra, dec): + c = SkyCoord(frame="icrs", ra=ra * u.degree, dec=dec * u.degree) + theta = (90.0 * u.degree - c.dec).to(u.radian).value + phi = c.ra.to(u.radian).value + vec = hp.pixelfunc.ang2vec(theta, phi) + return vec + + +NSIDE = 512 -NSIDE=512 def test_rois(): - #test to make sure ConeROI and MapROI with a simple, circular active region return the same active pixels. - - ra, dec = 100, 30 - data_radius = 10 - model_radius = 15 - - cone_roi = HealpixConeROI(data_radius=data_radius, - model_radius=model_radius, - ra=ra, - dec=dec) - - m = np.zeros(hp.nside2npix(NSIDE)) - vec = Sky2Vec(ra, dec) - m[hp.query_disc(NSIDE, vec, (data_radius*u.degree).to(u.radian).value, inclusive=False)] = 1 - - hp.fitsfunc.write_map("roitemp.fits" , m, nest=False, coord="C", partial=False, overwrite=True ) - - - map_roi = HealpixMapROI(data_radius=data_radius, ra=ra, dec=dec, model_radius=model_radius, roimap=m) - fits_roi = HealpixMapROI(data_radius=data_radius, ra=ra, dec=dec, model_radius=model_radius, roifile="roitemp.fits") - - assert np.all( cone_roi.active_pixels(NSIDE) == map_roi.active_pixels(NSIDE)) - assert np.all( fits_roi.active_pixels(NSIDE) == map_roi.active_pixels(NSIDE)) - - os.remove( "roitemp.fits" ) - #test that all is still good after saving the ROIs to dictionaries and restoring. - cone_dict = cone_roi.to_dict() - map_dict = map_roi.to_dict() - fits_dict = fits_roi.to_dict() - - cone_roi2 = get_roi_from_dict( cone_dict ) - map_roi2 = get_roi_from_dict( map_dict ) - fits_roi2 = get_roi_from_dict( fits_dict ) - - assert np.all( cone_roi2.active_pixels(NSIDE) == map_roi.active_pixels(NSIDE)) - assert np.all( fits_roi2.active_pixels(NSIDE) == map_roi.active_pixels(NSIDE)) - assert np.all( map_roi2.active_pixels(NSIDE) == map_roi.active_pixels(NSIDE)) - - + # test to make sure ConeROI and MapROI with a simple, circular active region return the same active pixels. + + ra, dec = 100, 30 + data_radius = 10 + model_radius = 15 + + cone_roi = HealpixConeROI( + data_radius=data_radius, model_radius=model_radius, ra=ra, dec=dec + ) + + m = np.zeros(hp.nside2npix(NSIDE)) + vec = Sky2Vec(ra, dec) + m[ + hp.query_disc( + NSIDE, vec, (data_radius * u.degree).to(u.radian).value, inclusive=False + ) + ] = 1 + + hp.fitsfunc.write_map( + "roitemp.fits", m, nest=False, coord="C", partial=False, overwrite=True + ) + + map_roi = HealpixMapROI( + data_radius=data_radius, ra=ra, dec=dec, model_radius=model_radius, roimap=m + ) + fits_roi = HealpixMapROI( + data_radius=data_radius, + ra=ra, + dec=dec, + model_radius=model_radius, + roifile="roitemp.fits", + ) + + assert np.all(cone_roi.active_pixels(NSIDE) == map_roi.active_pixels(NSIDE)) + assert np.all(fits_roi.active_pixels(NSIDE) == map_roi.active_pixels(NSIDE)) + + os.remove("roitemp.fits") + # test that all is still good after saving the ROIs to dictionaries and restoring. + cone_dict = cone_roi.to_dict() + map_dict = map_roi.to_dict() + fits_dict = fits_roi.to_dict() + + cone_roi2 = get_roi_from_dict(cone_dict) + map_roi2 = get_roi_from_dict(map_dict) + fits_roi2 = get_roi_from_dict(fits_dict) + + assert np.all(cone_roi2.active_pixels(NSIDE) == map_roi.active_pixels(NSIDE)) + assert np.all(fits_roi2.active_pixels(NSIDE) == map_roi.active_pixels(NSIDE)) + assert np.all(map_roi2.active_pixels(NSIDE) == map_roi.active_pixels(NSIDE)) diff --git a/hawc_hal/tests/test_root_to_hdf.py b/hawc_hal/tests/test_root_to_hdf.py index 5520e7a..d3d68f9 100644 --- a/hawc_hal/tests/test_root_to_hdf.py +++ b/hawc_hal/tests/test_root_to_hdf.py @@ -15,6 +15,7 @@ not has_root, reason="No ROOT available" ) + @skip_if_ROOT_is_not_available def test_root_to_hdf_response(response): @@ -35,9 +36,8 @@ def test_root_to_hdf_response(response): os.remove(test_filename) -def do_one_test_maptree(geminga_roi, - geminga_maptree, - fullsky=False): + +def do_one_test_maptree(geminga_roi, geminga_maptree, fullsky=False): # Test both with a defined ROI and full sky (ROI is None) if fullsky: @@ -64,16 +64,12 @@ def do_one_test_maptree(geminga_roi, os.remove(test_filename) + @skip_if_ROOT_is_not_available -def test_root_to_hdf_maptree_roi(geminga_roi, - geminga_maptree): - do_one_test_maptree(geminga_roi, - geminga_maptree, - fullsky=False) +def test_root_to_hdf_maptree_roi(geminga_roi, geminga_maptree): + do_one_test_maptree(geminga_roi, geminga_maptree, fullsky=False) + @skip_if_ROOT_is_not_available -def test_root_to_hdf_maptree_full_sky(geminga_roi, - geminga_maptree): - do_one_test_maptree(geminga_roi, - geminga_maptree, - fullsky=True) +def test_root_to_hdf_maptree_full_sky(geminga_roi, geminga_maptree): + do_one_test_maptree(geminga_roi, geminga_maptree, fullsky=True) diff --git a/hawc_hal/tests/test_serialization.py b/hawc_hal/tests/test_serialization.py index fe3d47c..38f04f7 100644 --- a/hawc_hal/tests/test_serialization.py +++ b/hawc_hal/tests/test_serialization.py @@ -8,26 +8,28 @@ def test_serialization(): my_obj = pd.Series(list(range(100))) - my_ob2 = pd.DataFrame.from_dict({'one': np.random.uniform(0, 1, 10), 'two': np.random.uniform(0, 1, 10)}) + my_ob2 = pd.DataFrame.from_dict( + {"one": np.random.uniform(0, 1, 10), "two": np.random.uniform(0, 1, 10)} + ) - meta = {'meta1': 1.0, 'meta2': 2.0} + meta = {"meta1": 1.0, "meta2": 2.0} - test_file = 'test.hd5' + test_file = "test.hd5" - with serialize.Serialization(test_file, mode='w') as store: + with serialize.Serialization(test_file, mode="w") as store: - store.store_pandas_object('test_obj', my_obj, **meta) - store.store_pandas_object('test_df', my_ob2) + store.store_pandas_object("test_obj", my_obj, **meta) + store.store_pandas_object("test_df", my_ob2) assert os.path.exists(test_file) # Now retrieve the object - with serialize.Serialization(test_file, mode='r') as store: + with serialize.Serialization(test_file, mode="r") as store: - copy_obj, copy_meta = store.retrieve_pandas_object('test_obj') - copy_obj2, copy_meta2 = store.retrieve_pandas_object('test_df') + copy_obj, copy_meta = store.retrieve_pandas_object("test_obj") + copy_obj2, copy_meta2 = store.retrieve_pandas_object("test_df") - assert set(store.keys) == set(('/test_obj','/test_df')) + assert set(store.keys) == set(("/test_obj", "/test_df")) assert np.alltrue(my_obj == copy_obj) assert np.alltrue(my_ob2 == copy_obj2) @@ -36,10 +38,6 @@ def test_serialization(): assert meta[k] == copy_meta[k] - assert len(copy_meta2)==0 + assert len(copy_meta2) == 0 os.remove(test_file) - - - - diff --git a/hawc_hal/util.py b/hawc_hal/util.py index 16c54d2..0a04a22 100644 --- a/hawc_hal/util.py +++ b/hawc_hal/util.py @@ -5,9 +5,7 @@ def cartesian(arrays): - return np.dstack( - np.meshgrid(*arrays, indexing='ij') - ).reshape(-1, len(arrays)) + return np.dstack(np.meshgrid(*arrays, indexing="ij")).reshape(-1, len(arrays)) def cartesian_(arrays, out=None): @@ -27,7 +25,7 @@ def cartesian_(arrays, out=None): out = np.zeros([n, len(arrays)], dtype=dtype) m = old_div(n, arrays[0].size) - out[:,0] = np.repeat(arrays[0], m) + out[:, 0] = np.repeat(arrays[0], m) if arrays[1:]: @@ -35,7 +33,7 @@ def cartesian_(arrays, out=None): for j in range(1, arrays[0].size): - out[j*m:(j+1)*m, 1:] = out[0:m, 1:] + out[j * m : (j + 1) * m, 1:] = out[0:m, 1:] return out From 67eac7a2942f7ba13269d82ee5076a394fee85c9 Mon Sep 17 00:00:00 2001 From: Ramiro Torres-Escobedo Date: Thu, 2 Feb 2023 16:59:44 +0800 Subject: [PATCH 2/9] syntax fix --- hawc_hal/response/response.py | 36 +++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/hawc_hal/response/response.py b/hawc_hal/response/response.py index 7f37bff..3cac23e 100644 --- a/hawc_hal/response/response.py +++ b/hawc_hal/response/response.py @@ -38,7 +38,8 @@ def hawc_response_factory(response_file_name): # See if this response is in the cache, if not build it - if not response_file_name in _instances: + # if not response_file_name in _instances: + if response_file_name not in _instances: # log.info("Creating singleton for %s" % response_file_name) log.info(f"Creating singleton for {response_file_name}") @@ -65,9 +66,10 @@ def hawc_response_factory(response_file_name): _instances[response_file_name] = new_instance - # return the response, whether it was already in the cache or we just built it + # return the response, whether it was already in the cache or we just built it - return _instances[response_file_name] # type: HAWCResponse + # return _instances[response_file_name] # type HAWCResponse + return _instances[response_file_name] class HAWCResponse(object): @@ -144,7 +146,9 @@ def from_hdf5(cls, response_file_name): this_effarea_df = effarea_dfs.loc[dec_center, energy_bin] sim_energy_bin_low = this_effarea_df.loc[:, "sim_energy_bin_low"].values - sim_energy_bin_centers = this_effarea_df.loc[:, "sim_energy_bin_centers"].values + sim_energy_bin_centers = this_effarea_df.loc[ + :, "sim_energy_bin_centers" + ].values sim_energy_bin_hi = this_effarea_df.loc[:, "sim_energy_bin_hi"].values sim_differential_photon_fluxes = this_effarea_df.loc[ :, "sim_differential_photon_fluxes" @@ -153,7 +157,9 @@ def from_hdf5(cls, response_file_name): :, "sim_signal_events_per_bin" ].values - this_psf = PSFWrapper.from_pandas(psf_dfs.loc[dec_center, energy_bin, :]) + this_psf = PSFWrapper.from_pandas( + psf_dfs.loc[dec_center, energy_bin, :] + ) this_response_bin = ResponseBin( energy_bin, @@ -206,7 +212,9 @@ def from_root_file(cls, response_file_name: Path): if not file_existing_and_readable(response_file_name): # pragma: no cover # raise IOError("Response %s does not exist or is not readable" % response_file_name) - raise IOError(f"Response {response_file_name} does not exist or is not readable") + raise IOError( + f"Response {response_file_name} does not exist or is not readable" + ) # Read response with uproot.open(str(response_file_name)) as response_file: @@ -222,7 +230,9 @@ def from_root_file(cls, response_file_name: Path): dec_bins_upper_edge = response_file["DecBins/upperEdge"].array().to_numpy() dec_bins_center = response_file["DecBins/simdec"].array().to_numpy() - dec_bins = list(zip(dec_bins_lower_edge, dec_bins_center, dec_bins_upper_edge)) + dec_bins = list( + zip(dec_bins_lower_edge, dec_bins_center, dec_bins_upper_edge) + ) try: @@ -232,7 +242,9 @@ def from_root_file(cls, response_file_name: Path): try: - response_bin_ids = response_file["AnalysisBins/id"].array().to_numpy() + response_bin_ids = ( + response_file["AnalysisBins/id"].array().to_numpy() + ) except uproot.KeyInFileError: @@ -258,7 +270,9 @@ def from_root_file(cls, response_file_name: Path): dec_id_label = f"dec_{dec_id:02d}" # count the number of keys if name of bins wasn't obtained before - n_energy_bins = len(response_file[dec_id_label].keys(recursive=False)) + n_energy_bins = len( + response_file[dec_id_label].keys(recursive=False) + ) response_bins_ids = list(range(n_energy_bins)) @@ -373,7 +387,9 @@ def display(self, verbose=False): if verbose: log.info(self._dec_bins) # log.info("Number of energy/nHit planes per dec bin_name: %s" % (self.n_energy_planes)) - log.info(f"Number of energy/nHit planes per dec bin_name: {self.n_energy_planes}") + log.info( + f"Number of energy/nHit planes per dec bin_name: {self.n_energy_planes}" + ) if verbose: log.info(list(self._response_bins.values())[0].keys()) From 83ca95c0945e5e7450dcb1689462b47298f5b7bf Mon Sep 17 00:00:00 2001 From: Ramiro Torres-Escobedo Date: Fri, 2 Jun 2023 23:18:33 +0800 Subject: [PATCH 3/9] adding scripts directory to be included during HAL installation --- setup.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/setup.py b/setup.py index 39d20de..455d1c7 100644 --- a/setup.py +++ b/setup.py @@ -1,17 +1,16 @@ +import os +import os.path + from setuptools import setup -import os, os.path # Create list of data files def find_data_files(directory): - paths = [] - for (path, directories, filenames) in os.walk(directory): - + for path, directories, filenames in os.walk(directory): for filename in filenames: - paths.append(os.path.join("..", path, filename)) return paths @@ -34,6 +33,7 @@ def find_data_files(directory): "hawc_hal/region_of_interest", "hawc_hal/convenience_functions", "hawc_hal/tests", + "hawc_hal/scripts", ], url="https://github.com/threeML/hawc_hal", license="BSD-3.0", From 832cc366ac03fd4de281d14a75f12f437ac9ce0c Mon Sep 17 00:00:00 2001 From: Ramiro Torres-Escobedo Date: Fri, 2 Jun 2023 23:36:38 +0800 Subject: [PATCH 4/9] Revert "syntax fix" This reverts commit 67eac7a2942f7ba13269d82ee5076a394fee85c9. --- hawc_hal/response/response.py | 36 ++++++++++------------------------- 1 file changed, 10 insertions(+), 26 deletions(-) diff --git a/hawc_hal/response/response.py b/hawc_hal/response/response.py index 3cac23e..7f37bff 100644 --- a/hawc_hal/response/response.py +++ b/hawc_hal/response/response.py @@ -38,8 +38,7 @@ def hawc_response_factory(response_file_name): # See if this response is in the cache, if not build it - # if not response_file_name in _instances: - if response_file_name not in _instances: + if not response_file_name in _instances: # log.info("Creating singleton for %s" % response_file_name) log.info(f"Creating singleton for {response_file_name}") @@ -66,10 +65,9 @@ def hawc_response_factory(response_file_name): _instances[response_file_name] = new_instance - # return the response, whether it was already in the cache or we just built it + # return the response, whether it was already in the cache or we just built it - # return _instances[response_file_name] # type HAWCResponse - return _instances[response_file_name] + return _instances[response_file_name] # type: HAWCResponse class HAWCResponse(object): @@ -146,9 +144,7 @@ def from_hdf5(cls, response_file_name): this_effarea_df = effarea_dfs.loc[dec_center, energy_bin] sim_energy_bin_low = this_effarea_df.loc[:, "sim_energy_bin_low"].values - sim_energy_bin_centers = this_effarea_df.loc[ - :, "sim_energy_bin_centers" - ].values + sim_energy_bin_centers = this_effarea_df.loc[:, "sim_energy_bin_centers"].values sim_energy_bin_hi = this_effarea_df.loc[:, "sim_energy_bin_hi"].values sim_differential_photon_fluxes = this_effarea_df.loc[ :, "sim_differential_photon_fluxes" @@ -157,9 +153,7 @@ def from_hdf5(cls, response_file_name): :, "sim_signal_events_per_bin" ].values - this_psf = PSFWrapper.from_pandas( - psf_dfs.loc[dec_center, energy_bin, :] - ) + this_psf = PSFWrapper.from_pandas(psf_dfs.loc[dec_center, energy_bin, :]) this_response_bin = ResponseBin( energy_bin, @@ -212,9 +206,7 @@ def from_root_file(cls, response_file_name: Path): if not file_existing_and_readable(response_file_name): # pragma: no cover # raise IOError("Response %s does not exist or is not readable" % response_file_name) - raise IOError( - f"Response {response_file_name} does not exist or is not readable" - ) + raise IOError(f"Response {response_file_name} does not exist or is not readable") # Read response with uproot.open(str(response_file_name)) as response_file: @@ -230,9 +222,7 @@ def from_root_file(cls, response_file_name: Path): dec_bins_upper_edge = response_file["DecBins/upperEdge"].array().to_numpy() dec_bins_center = response_file["DecBins/simdec"].array().to_numpy() - dec_bins = list( - zip(dec_bins_lower_edge, dec_bins_center, dec_bins_upper_edge) - ) + dec_bins = list(zip(dec_bins_lower_edge, dec_bins_center, dec_bins_upper_edge)) try: @@ -242,9 +232,7 @@ def from_root_file(cls, response_file_name: Path): try: - response_bin_ids = ( - response_file["AnalysisBins/id"].array().to_numpy() - ) + response_bin_ids = response_file["AnalysisBins/id"].array().to_numpy() except uproot.KeyInFileError: @@ -270,9 +258,7 @@ def from_root_file(cls, response_file_name: Path): dec_id_label = f"dec_{dec_id:02d}" # count the number of keys if name of bins wasn't obtained before - n_energy_bins = len( - response_file[dec_id_label].keys(recursive=False) - ) + n_energy_bins = len(response_file[dec_id_label].keys(recursive=False)) response_bins_ids = list(range(n_energy_bins)) @@ -387,9 +373,7 @@ def display(self, verbose=False): if verbose: log.info(self._dec_bins) # log.info("Number of energy/nHit planes per dec bin_name: %s" % (self.n_energy_planes)) - log.info( - f"Number of energy/nHit planes per dec bin_name: {self.n_energy_planes}" - ) + log.info(f"Number of energy/nHit planes per dec bin_name: {self.n_energy_planes}") if verbose: log.info(list(self._response_bins.values())[0].keys()) From 2cdbf1cf427bf4a5ba489423c0417a137aa57bab Mon Sep 17 00:00:00 2001 From: Ramiro Torres-Escobedo Date: Fri, 2 Jun 2023 23:41:48 +0800 Subject: [PATCH 5/9] Revert "formatting code" This reverts commit 3657df54c5dbc781643ae650bcc84a7d4b246a96. --- hawc_hal/HAL.py | 143 +++-------- .../convenience_functions/fit_point_source.py | 33 +-- hawc_hal/convolved_source/__init__.py | 5 +- .../convolved_extended_source.py | 120 +++------ .../convolved_point_source.py | 91 +++---- .../convolved_source_container.py | 2 - hawc_hal/flat_sky_projection.py | 31 +-- hawc_hal/healpix_handling/__init__.py | 2 +- .../healpix_handling/flat_sky_to_healpix.py | 40 ++- .../healpix_handling/gnomonic_projection.py | 63 +++-- hawc_hal/healpix_handling/healpix_utils.py | 2 +- hawc_hal/healpix_handling/sparse_healpix.py | 9 +- hawc_hal/interpolation/__init__.py | 2 +- .../fast_bilinar_interpolation.py | 31 +-- hawc_hal/interpolation/irregular_grid.py | 7 +- .../interpolation/log_log_interpolation.py | 5 +- hawc_hal/log_likelihood.py | 16 +- hawc_hal/maptree/data_analysis_bin.py | 37 +-- hawc_hal/maptree/map_tree.py | 18 +- hawc_hal/obsolete/fast_convolution.py | 34 ++- hawc_hal/obsolete/image_to_healpix.py | 44 +--- hawc_hal/obsolete/tf1_wrapper.py | 5 +- hawc_hal/obsolete/ts_map.py | 235 ++++++++---------- hawc_hal/psf_fast/__init__.py | 2 +- hawc_hal/psf_fast/psf_convolutor.py | 32 +-- hawc_hal/psf_fast/psf_interpolator.py | 48 ++-- hawc_hal/psf_fast/psf_wrapper.py | 12 +- hawc_hal/region_of_interest/__init__.py | 2 +- .../region_of_interest/healpix_cone_roi.py | 48 ++-- .../region_of_interest/healpix_map_roi.py | 107 +++----- .../region_of_interest/healpix_roi_base.py | 21 +- hawc_hal/response/__init__.py | 2 +- hawc_hal/response/response_bin.py | 23 +- hawc_hal/serialize.py | 9 +- hawc_hal/sphere_dist.py | 2 +- hawc_hal/tests/conftest.py | 12 +- hawc_hal/tests/test_bilinar_interpolation.py | 8 +- hawc_hal/tests/test_complete_analysis.py | 48 ++-- hawc_hal/tests/test_copy.py | 11 +- hawc_hal/tests/test_geminga_paper.py | 185 +++++--------- hawc_hal/tests/test_healpixRoi.py | 161 ++++++------ hawc_hal/tests/test_maptree.py | 17 +- hawc_hal/tests/test_model_residual_maps.py | 44 ++-- hawc_hal/tests/test_n_transits.py | 4 +- hawc_hal/tests/test_on_point_source.py | 61 ++--- hawc_hal/tests/test_psf.py | 1 - hawc_hal/tests/test_roi.py | 104 ++++---- hawc_hal/tests/test_root_to_hdf.py | 22 +- hawc_hal/tests/test_serialization.py | 28 ++- hawc_hal/util.py | 8 +- 50 files changed, 754 insertions(+), 1243 deletions(-) diff --git a/hawc_hal/HAL.py b/hawc_hal/HAL.py index 92bc520..22ed34f 100644 --- a/hawc_hal/HAL.py +++ b/hawc_hal/HAL.py @@ -82,9 +82,7 @@ def __init__( # Set up the flat-sky projection self.flat_sky_pixels_size = flat_sky_pixels_size - self._flat_sky_projection = self._roi.get_flat_sky_projection( - self.flat_sky_pixels_size - ) + self._flat_sky_projection = self._roi.get_flat_sky_projection(self.flat_sky_pixels_size) # Read map tree (data) self._maptree = map_tree_factory(maptree, roi=self._roi, n_transits=n_transits) @@ -208,9 +206,7 @@ def psf_integration_method(self, mode): def _setup_psf_convolutors(self): - central_response_bins = self._response.get_response_dec_bin( - self._roi.ra_dec_center[1] - ) + central_response_bins = self._response.get_response_dec_bin(self._roi.ra_dec_center[1]) self._psf_convolutors = collections.OrderedDict() for bin_id in central_response_bins: @@ -283,9 +279,7 @@ def set_active_measurements(self, bin_id_min=None, bin_id_max=None, bin_list=Non for this_bin in range(bin_id_min, bin_id_max + 1): this_bin = str(this_bin) if this_bin not in self._all_planes: - raise ValueError( - f"Bin {this_bin} is not contained in this maptree." - ) + raise ValueError(f"Bin {this_bin} is not contained in this maptree.") self._active_planes.append(this_bin) @@ -303,9 +297,7 @@ def set_active_measurements(self, bin_id_min=None, bin_id_max=None, bin_list=Non # if not this_bin in self._all_planes: if this_bin not in self._all_planes: - raise ValueError( - f"Bin {this_bin} is not contained in this maptree." - ) + raise ValueError(f"Bin {this_bin} is not contained in this maptree.") self._active_planes.append(this_bin) @@ -460,9 +452,7 @@ def get_excess_background(self, ra: float, dec: float, radius: float): # each radial bin data = data_analysis_bin.observation_map.as_partial() bkg = data_analysis_bin.background_map.as_partial() - mdl = self._get_model_map( - energy_id, n_point_sources, n_ext_sources - ).as_partial() + mdl = self._get_model_map(energy_id, n_point_sources, n_ext_sources).as_partial() # select counts only from the pixels within specifid distance from # origin of radial profile @@ -529,10 +519,7 @@ def get_radial_profile( # The area of each ring is then given by the difference between two # subsequent circe areas. area = np.array( - [ - self.get_excess_background(ra, dec, r + offset * delta_r)[0] - for r in radii - ] + [self.get_excess_background(ra, dec, r + offset * delta_r)[0] for r in radii] ) temp = area[1:] - area[:-1] @@ -540,10 +527,7 @@ def get_radial_profile( # signals signal = np.array( - [ - self.get_excess_background(ra, dec, r + offset * delta_r)[1] - for r in radii - ] + [self.get_excess_background(ra, dec, r + offset * delta_r)[1] for r in radii] ) temp = signal[1:] - signal[:-1] @@ -551,10 +535,7 @@ def get_radial_profile( # backgrounds bkg = np.array( - [ - self.get_excess_background(ra, dec, r + offset * delta_r)[2] - for r in radii - ] + [self.get_excess_background(ra, dec, r + offset * delta_r)[2] for r in radii] ) temp = bkg[1:] - bkg[:-1] @@ -565,10 +546,7 @@ def get_radial_profile( # model # convert 'top hat' excess into 'ring' excesses. model = np.array( - [ - self.get_excess_background(ra, dec, r + offset * delta_r)[3] - for r in radii - ] + [self.get_excess_background(ra, dec, r + offset * delta_r)[3] for r in radii] ) temp = model[1:] - model[:-1] @@ -579,10 +557,7 @@ def get_radial_profile( self.set_model(model_to_subtract) model_subtract = np.array( - [ - self.get_excess_background(ra, dec, r + offset * delta_r)[3] - for r in radii - ] + [self.get_excess_background(ra, dec, r + offset * delta_r)[3] for r in radii] ) temp = model_subtract[1:] - model_subtract[:-1] @@ -602,17 +577,11 @@ def get_radial_profile( # them to the data later. Weight is normalized (sum of weights over # the bins = 1). - total_excess = np.array(self.get_excess_background(ra, dec, max_radius)[1])[ - good_planes - ] + total_excess = np.array(self.get_excess_background(ra, dec, max_radius)[1])[good_planes] - total_bkg = np.array(self.get_excess_background(ra, dec, max_radius)[2])[ - good_planes - ] + total_bkg = np.array(self.get_excess_background(ra, dec, max_radius)[2])[good_planes] - total_model = np.array(self.get_excess_background(ra, dec, max_radius)[3])[ - good_planes - ] + total_model = np.array(self.get_excess_background(ra, dec, max_radius)[3])[good_planes] w = np.divide(total_model, total_bkg) weight = np.array([w / np.sum(w) for _ in radii]) @@ -666,13 +635,7 @@ def plot_radial_profile( values for easy retrieval """ - ( - radii, - excess_model, - excess_data, - excess_error, - plane_ids, - ) = self.get_radial_profile( + (radii, excess_model, excess_data, excess_error, plane_ids,) = self.get_radial_profile( ra, dec, active_planes, @@ -704,9 +667,7 @@ def plot_radial_profile( plt.plot(radii, excess_model, color="red", label="Model") - plt.legend( - bbox_to_anchor=(1.0, 1.0), loc="upper right", numpoints=1, fontsize=16 - ) + plt.legend(bbox_to_anchor=(1.0, 1.0), loc="upper right", numpoints=1, fontsize=16) plt.axhline(0, color="deepskyblue", linestyle="--") x_limits = [0, max_radius] @@ -816,9 +777,7 @@ def display_spectrum(self): yerr = [yerr_high, yerr_low] - return self._plot_spectrum( - net_counts, yerr, model_only, residuals, residuals_err - ) + return self._plot_spectrum(net_counts, yerr, model_only, residuals, residuals_err) def _plot_spectrum(self, net_counts, yerr, model_only, residuals, residuals_err): @@ -892,9 +851,7 @@ def get_log_like(self, individual_bins=False, return_null=False): bkg_renorm = list(self._nuisance_parameters.values())[0].value obs = data_analysis_bin.observation_map.as_partial() # type: np.array - bkg = ( - data_analysis_bin.background_map.as_partial() * bkg_renorm - ) # type: np.array + bkg = data_analysis_bin.background_map.as_partial() * bkg_renorm # type: np.array this_pseudo_log_like = log_likelihood(obs, bkg, this_model_map_hpx) @@ -987,9 +944,7 @@ def get_simulated_dataset(self, name): # Active plane. Generate new data expectation = self._clone[1][bin_id] - new_data = np.random.poisson( - expectation, size=(1, expectation.shape[0]) - ).flatten() + new_data = np.random.poisson(expectation, size=(1, expectation.shape[0])).flatten() # Substitute data data_analysis_bin.observation_map.set_new_values(new_data) @@ -999,18 +954,16 @@ def get_simulated_dataset(self, name): # Adjust the name of the nuisance parameter old_name = list(self._clone[0]._nuisance_parameters.keys())[0] new_name = old_name.replace(self.name, name) - self._clone[0]._nuisance_parameters[new_name] = self._clone[ - 0 - ]._nuisance_parameters.pop(old_name) + self._clone[0]._nuisance_parameters[new_name] = self._clone[0]._nuisance_parameters.pop( + old_name + ) # Recompute biases self._clone[0]._compute_likelihood_biases() return self._clone[0] - def _get_expectation( - self, data_analysis_bin, energy_bin_id, n_point_sources, n_ext_sources - ): + def _get_expectation(self, data_analysis_bin, energy_bin_id, n_point_sources, n_ext_sources): # Compute the expectation from the model @@ -1026,9 +979,7 @@ def _get_expectation( psf_integration_method=self._psf_integration_method, ) - expectation_from_this_source = ( - expectation_per_transit * data_analysis_bin.n_transits - ) + expectation_from_this_source = expectation_per_transit * data_analysis_bin.n_transits if this_model_map is None: @@ -1067,18 +1018,14 @@ def _get_expectation( # Only extended sources this_model_map = ( - self._psf_convolutors[energy_bin_id].extended_source_image( - this_ext_model_map - ) + self._psf_convolutors[energy_bin_id].extended_source_image(this_ext_model_map) * data_analysis_bin.n_transits ) else: this_model_map += ( - self._psf_convolutors[energy_bin_id].extended_source_image( - this_ext_model_map - ) + self._psf_convolutors[energy_bin_id].extended_source_image(this_ext_model_map) * data_analysis_bin.n_transits ) @@ -1088,18 +1035,14 @@ def _get_expectation( # First divide for the pixel area because we need to interpolate brightness # this_model_map = old_div(this_model_map, self._flat_sky_projection.project_plane_pixel_area) - this_model_map = ( - this_model_map / self._flat_sky_projection.project_plane_pixel_area - ) + this_model_map = this_model_map / self._flat_sky_projection.project_plane_pixel_area this_model_map_hpx = self._flat_sky_to_healpix_transform[energy_bin_id]( this_model_map, fill_value=0.0 ) # Now multiply by the pixel area of the new map to go back to flux - this_model_map_hpx *= hp.nside2pixarea( - data_analysis_bin.nside, degrees=True - ) + this_model_map_hpx *= hp.nside2pixarea(data_analysis_bin.nside, degrees=True) else: @@ -1174,9 +1117,7 @@ def display_fit(self, smoothing_kernel_sigma=0.1, display_colorbar=False): this_ra, this_dec = self._roi.ra_dec_center # Make a full healpix map for a second - whole_map = self._get_model_map( - plane_id, n_point_sources, n_ext_sources - ).as_dense() + whole_map = self._get_model_map(plane_id, n_point_sources, n_ext_sources).as_dense() # Healpix uses longitude between -180 and 180, while R.A. is between 0 and 360. We need to fix that: longitude = ra_to_longitude(this_ra) @@ -1185,9 +1126,7 @@ def display_fit(self, smoothing_kernel_sigma=0.1, display_colorbar=False): latitude = this_dec # Background and excess maps - bkg_subtracted, _, background_map = self._get_excess( - data_analysis_bin, all_maps=True - ) + bkg_subtracted, _, background_map = self._get_excess(data_analysis_bin, all_maps=True) # Make all the projections: model, excess, background, residuals proj_model = self._represent_healpix_map( @@ -1221,15 +1160,11 @@ def display_fit(self, smoothing_kernel_sigma=0.1, display_colorbar=False): vmax = max(np.nanmax(proj_model), np.nanmax(proj_data)) # Plot model - images[0] = subs[i][0].imshow( - proj_model, origin="lower", vmin=vmin, vmax=vmax - ) + images[0] = subs[i][0].imshow(proj_model, origin="lower", vmin=vmin, vmax=vmax) subs[i][0].set_title("model, bin {}".format(data_analysis_bin.name)) # Plot data map - images[1] = subs[i][1].imshow( - proj_data, origin="lower", vmin=vmin, vmax=vmax - ) + images[1] = subs[i][1].imshow(proj_data, origin="lower", vmin=vmin, vmax=vmax) subs[i][1].set_title("excess, bin {}".format(data_analysis_bin.name)) # Plot background map. @@ -1352,9 +1287,7 @@ def _get_model_map(self, plane_id, n_pt_src, n_ext_src): raise ValueError(f"{plane_id} not a plane in the current model") model_map = SparseHealpix( - self._get_expectation( - self._maptree[plane_id], plane_id, n_pt_src, n_ext_src - ), + self._get_expectation(self._maptree[plane_id], plane_id, n_pt_src, n_ext_src), self._active_pixels[plane_id], self._maptree[plane_id].observation_map.nside, ) @@ -1428,17 +1361,13 @@ def _write_a_map(self, file_name, which, fluctuate=False, return_map=False): if return_map: return new_map_tree - def write_model_map( - self, file_name, poisson_fluctuate=False, test_return_map=False - ): + def write_model_map(self, file_name, poisson_fluctuate=False, test_return_map=False): """ This function writes the model map to a file. The interface is based off of HAWCLike for consistency """ if test_return_map: - log.warning( - "test_return_map=True should only be used for testing purposes!" - ) + log.warning("test_return_map=True should only be used for testing purposes!") return self._write_a_map(file_name, "model", poisson_fluctuate, test_return_map) def write_residual_map(self, file_name, test_return_map=False): @@ -1447,7 +1376,5 @@ def write_residual_map(self, file_name, test_return_map=False): The interface is based off of HAWCLike for consistency """ if test_return_map: - log.warning( - "test_return_map=True should only be used for testing purposes!" - ) + log.warning("test_return_map=True should only be used for testing purposes!") return self._write_a_map(file_name, "residual", False, test_return_map) diff --git a/hawc_hal/convenience_functions/fit_point_source.py b/hawc_hal/convenience_functions/fit_point_source.py index a1a44ec..abb7f07 100644 --- a/hawc_hal/convenience_functions/fit_point_source.py +++ b/hawc_hal/convenience_functions/fit_point_source.py @@ -3,24 +3,26 @@ from threeML import DataList, JointLikelihood -def fit_point_source( - roi, - maptree, - response, - point_source_model, - bin_list, - confidence_intervals=False, - liff=False, - pixel_size=0.17, - verbose=False, -): +def fit_point_source(roi, + maptree, + response, + point_source_model, + bin_list, + confidence_intervals=False, + liff=False, + pixel_size=0.17, + verbose=False): data_radius = roi.data_radius.to("deg").value if not liff: # This is a 3ML plugin - hawc = HAL("HAWC", maptree, response, roi, flat_sky_pixels_size=pixel_size) + hawc = HAL("HAWC", + maptree, + response, + roi, + flat_sky_pixels_size=pixel_size) hawc.set_active_measurements(bin_list=bin_list) @@ -28,7 +30,10 @@ def fit_point_source( from threeML import HAWCLike - hawc = HAWCLike("HAWC", maptree, response, fullsky=True) + hawc = HAWCLike("HAWC", + maptree, + response, + fullsky=True) hawc.set_bin_list(bin_list) @@ -64,4 +69,4 @@ def fit_point_source( ci = None - return param_df, like_df, ci, jl.results + return param_df, like_df, ci, jl.results \ No newline at end of file diff --git a/hawc_hal/convolved_source/__init__.py b/hawc_hal/convolved_source/__init__.py index dff1d09..d680135 100644 --- a/hawc_hal/convolved_source/__init__.py +++ b/hawc_hal/convolved_source/__init__.py @@ -1,7 +1,4 @@ from __future__ import absolute_import from .convolved_source_container import ConvolvedSourcesContainer from .convolved_point_source import ConvolvedPointSource -from .convolved_extended_source import ( - ConvolvedExtendedSource2D, - ConvolvedExtendedSource3D, -) +from .convolved_extended_source import ConvolvedExtendedSource2D, ConvolvedExtendedSource3D \ No newline at end of file diff --git a/hawc_hal/convolved_source/convolved_extended_source.py b/hawc_hal/convolved_source/convolved_extended_source.py index 819feda..f76cf58 100644 --- a/hawc_hal/convolved_source/convolved_extended_source.py +++ b/hawc_hal/convolved_source/convolved_extended_source.py @@ -6,7 +6,6 @@ from astromodels import use_astromodels_memoization from threeML.io.logging import setup_logger - log = setup_logger(__name__) log.propagate = False @@ -25,12 +24,12 @@ def _select_with_wrap_around(arr, start, stop, wrap=(360, 0)): return idx - # Conversion factor between deg^2 and rad^2 deg2_to_rad2 = 0.00030461741978670857 class ConvolvedExtendedSource(object): + def __init__(self, source, response, flat_sky_projection): self._response = response @@ -44,12 +43,7 @@ def __init__(self, source, response, flat_sky_projection): # Find out the response bins we need to consider for this extended source # # Get the footprint (i..e, the coordinates of the 4 points limiting the projections) - ( - (ra1, dec1), - (ra2, dec2), - (ra3, dec3), - (ra4, dec4), - ) = flat_sky_projection.wcs.calc_footprint() + (ra1, dec1), (ra2, dec2), (ra3, dec3), (ra4, dec4) = flat_sky_projection.wcs.calc_footprint() (lon_start, lon_stop), (lat_start, lat_stop) = source.get_boundaries() @@ -62,68 +56,44 @@ def __init__(self, source, response, flat_sky_projection): upper_edges = np.array([x[-1] for x in response.dec_bins]) centers = np.array([x[1] for x in response.dec_bins]) - dec_bins_to_consider_idx = np.flatnonzero( - (upper_edges >= dec_min) & (lower_edges <= dec_max) - ) + dec_bins_to_consider_idx = np.flatnonzero((upper_edges >= dec_min) & (lower_edges <= dec_max)) # Wrap the selection so we have always one bin before and one after. # NOTE: we assume that the ROI do not overlap with the very first or the very last dec bin # Add one dec bin to cover the last part - dec_bins_to_consider_idx = np.append( - dec_bins_to_consider_idx, [dec_bins_to_consider_idx[-1] + 1] - ) + dec_bins_to_consider_idx = np.append(dec_bins_to_consider_idx, [dec_bins_to_consider_idx[-1] + 1]) # Add one dec bin to cover the first part - dec_bins_to_consider_idx = np.insert( - dec_bins_to_consider_idx, 0, [dec_bins_to_consider_idx[0] - 1] - ) + dec_bins_to_consider_idx = np.insert(dec_bins_to_consider_idx, 0, [dec_bins_to_consider_idx[0] - 1]) - self._dec_bins_to_consider = [ - response.response_bins[centers[x]] for x in dec_bins_to_consider_idx - ] + self._dec_bins_to_consider = [response.response_bins[centers[x]] for x in dec_bins_to_consider_idx] - log.info( - "Considering %i dec bins for extended source %s" - % (len(self._dec_bins_to_consider), self._name) - ) + log.info("Considering %i dec bins for extended source %s" % (len(self._dec_bins_to_consider), + self._name)) # Find central bin for the PSF dec_center = (lat_start + lat_stop) / 2.0 # - self._central_response_bins = response.get_response_dec_bin( - dec_center, interpolate=False - ) + self._central_response_bins = response.get_response_dec_bin(dec_center, interpolate=False) log.info("Central bin is bin at Declination = %.3f" % dec_center) # Take note of the pixels within the flat sky projection that actually need to be computed. If the extended # source is significantly smaller than the flat sky projection, this gains a substantial amount of time - idx_lon = _select_with_wrap_around( - self._flat_sky_projection.ras, lon_start, lon_stop, (360, 0) - ) - idx_lat = _select_with_wrap_around( - self._flat_sky_projection.decs, lat_start, lat_stop, (90, -90) - ) + idx_lon = _select_with_wrap_around(self._flat_sky_projection.ras, lon_start, lon_stop, (360, 0)) + idx_lat = _select_with_wrap_around(self._flat_sky_projection.decs, lat_start, lat_stop, (90, -90)) - self._active_flat_sky_mask = idx_lon & idx_lat + self._active_flat_sky_mask = (idx_lon & idx_lat) - assert np.sum(self._active_flat_sky_mask) > 0, ( - "Mismatch between source %s and ROI" % self._name - ) + assert np.sum(self._active_flat_sky_mask) > 0, "Mismatch between source %s and ROI" % self._name # Get the energies needed for the computation of the flux - self._energy_centers_keV = ( - self._central_response_bins[ - list(self._central_response_bins.keys())[0] - ].sim_energy_bin_centers - * 1e9 - ) + self._energy_centers_keV = self._central_response_bins[list(self._central_response_bins.keys())[0]].sim_energy_bin_centers * 1e9 # Prepare array for fluxes - self._all_fluxes = np.zeros( - (self._flat_sky_projection.ras.shape[0], self._energy_centers_keV.shape[0]) - ) + self._all_fluxes = np.zeros((self._flat_sky_projection.ras.shape[0], + self._energy_centers_keV.shape[0])) def _setup_callbacks(self, callback): @@ -148,11 +118,10 @@ def get_source_map(self, energy_bin_id, tag=None): class ConvolvedExtendedSource3D(ConvolvedExtendedSource): + def __init__(self, source, response, flat_sky_projection): - super(ConvolvedExtendedSource3D, self).__init__( - source, response, flat_sky_projection - ) + super(ConvolvedExtendedSource3D, self).__init__(source, response, flat_sky_projection) # We implement a caching system so that the source flux is evaluated only when strictly needed, # because it is the most computationally intense part otherwise. @@ -185,76 +154,51 @@ def get_source_map(self, energy_bin_id, tag=None): # Recompute the fluxes for the pixels that are covered by this extended source self._all_fluxes[self._active_flat_sky_mask, :] = self._source( - self._flat_sky_projection.ras[self._active_flat_sky_mask], - self._flat_sky_projection.decs[self._active_flat_sky_mask], - self._energy_centers_keV, - ) # 1 / (keV cm^2 s rad^2) + self._flat_sky_projection.ras[self._active_flat_sky_mask], + self._flat_sky_projection.decs[self._active_flat_sky_mask], + self._energy_centers_keV) # 1 / (keV cm^2 s rad^2) # We don't need to recompute the function anymore until a parameter changes self._recompute_flux = False # Now compute the expected signal - pixel_area_rad2 = ( - self._flat_sky_projection.project_plane_pixel_area * deg2_to_rad2 - ) + pixel_area_rad2 = self._flat_sky_projection.project_plane_pixel_area * deg2_to_rad2 this_model_image = np.zeros(self._all_fluxes.shape[0]) # Loop over the Dec bins that cover this source and compute the expected flux, interpolating between # two dec bins for each point - for dec_bin1, dec_bin2 in zip( - self._dec_bins_to_consider[:-1], self._dec_bins_to_consider[1:] - ): + for dec_bin1, dec_bin2 in zip(self._dec_bins_to_consider[:-1], self._dec_bins_to_consider[1:]): # Get the two response bins to consider this_response_bin1 = dec_bin1[energy_bin_id] this_response_bin2 = dec_bin2[energy_bin_id] # Figure out which pixels are between the centers of the dec bins we are considering - c1, c2 = ( - this_response_bin1.declination_center, - this_response_bin2.declination_center, - ) + c1, c2 = this_response_bin1.declination_center, this_response_bin2.declination_center - idx = ( - (self._flat_sky_projection.decs >= c1) - & (self._flat_sky_projection.decs < c2) - & self._active_flat_sky_mask - ) + idx = (self._flat_sky_projection.decs >= c1) & (self._flat_sky_projection.decs < c2) & \ + self._active_flat_sky_mask # Reweight the spectrum separately for the two bins # NOTE: the scale is the same because the sim_differential_photon_fluxes are the same (the simulation # used to make the response used the same spectrum for each bin). What changes between the two bins # is the observed signal per bin (the .sim_signal_events_per_bin member) - scale = old_div( - (self._all_fluxes[idx, :] * pixel_area_rad2), - this_response_bin1.sim_differential_photon_fluxes, - ) + scale = old_div((self._all_fluxes[idx, :] * pixel_area_rad2), this_response_bin1.sim_differential_photon_fluxes) # Compute the interpolation weights for the two responses w1 = old_div((self._flat_sky_projection.decs[idx] - c2), (c1 - c2)) w2 = old_div((self._flat_sky_projection.decs[idx] - c1), (c2 - c1)) - this_model_image[idx] = ( - w1 - * np.sum( - scale * this_response_bin1.sim_signal_events_per_bin, axis=1 - ) - + w2 - * np.sum( - scale * this_response_bin2.sim_signal_events_per_bin, axis=1 - ) - ) * 1e9 + this_model_image[idx] = (w1 * np.sum(scale * this_response_bin1.sim_signal_events_per_bin, axis=1) + + w2 * np.sum(scale * this_response_bin2.sim_signal_events_per_bin, axis=1)) * \ + 1e9 # Reshape the flux array into an image - this_model_image = this_model_image.reshape( - ( - self._flat_sky_projection.npix_height, - self._flat_sky_projection.npix_width, - ) - ).T + this_model_image = this_model_image.reshape((self._flat_sky_projection.npix_height, + self._flat_sky_projection.npix_width)).T return this_model_image diff --git a/hawc_hal/convolved_source/convolved_point_source.py b/hawc_hal/convolved_source/convolved_point_source.py index 2bcbbf0..219a640 100644 --- a/hawc_hal/convolved_source/convolved_point_source.py +++ b/hawc_hal/convolved_source/convolved_point_source.py @@ -11,12 +11,12 @@ from ..interpolation.log_log_interpolation import LogLogInterpolator from threeML.io.logging import setup_logger - log = setup_logger(__name__) log.propagate = False class ConvolvedPointSource(object): + def __init__(self, source, response, flat_sky_projection): assert isinstance(source, PointSource) @@ -48,29 +48,21 @@ def _update_dec_bins(self, dec_src): if abs(dec_src - self._last_processed_position[1]) > 0.1: # Source moved by more than 0.1 deg, let's recompute the response and the PSF - self._response_energy_bins = self._response.get_response_dec_bin( - dec_src, interpolate=True - ) + self._response_energy_bins = self._response.get_response_dec_bin(dec_src, interpolate=True) # Setup the PSF interpolators self._psf_interpolators = collections.OrderedDict() for bin_id in self._response_energy_bins: - self._psf_interpolators[bin_id] = PSFInterpolator( - self._response_energy_bins[bin_id].psf, self._flat_sky_projection - ) + self._psf_interpolators[bin_id] = PSFInterpolator(self._response_energy_bins[bin_id].psf, + self._flat_sky_projection) - def get_source_map( - self, response_bin_id, tag=None, integrate=False, psf_integration_method="fast" - ): + def get_source_map(self, response_bin_id, tag=None, integrate=False, psf_integration_method='fast'): # Get current point source position # NOTE: this might change if the point source position is free during the fit, # that's why it is here - ra_src, dec_src = ( - self._source.position.ra.value, - self._source.position.dec.value, - ) + ra_src, dec_src = self._source.position.ra.value, self._source.position.dec.value if (ra_src, dec_src) != self._last_processed_position: @@ -86,26 +78,21 @@ def get_source_map( # Get the PSF image # This is cached inside the PSF class, so that if the position doesn't change this line # is very fast - this_map = psf_interpolator.point_source_image( - ra_src, dec_src, psf_integration_method - ) + this_map = psf_interpolator.point_source_image(ra_src, dec_src, psf_integration_method) # Check that the point source image is entirely contained in the ROI, if not print a warning map_sum = this_map.sum() if not np.isclose(map_sum, 1.0, rtol=1e-2): - log.warning( - "PSF for source %s is not entirely contained " - "in ROI for response bin %s. Fraction is %.2f instead of 1.0. " - "Consider enlarging your model ROI." - % (self._name, response_bin_id, map_sum) - ) + log.warning("PSF for source %s is not entirely contained " + "in ROI for response bin %s. Fraction is %.2f instead of 1.0. " + "Consider enlarging your model ROI." % (self._name, + response_bin_id, + map_sum)) # Compute the fluxes from the spectral function at the same energies as the simulated function - energy_centers_keV = ( - response_energy_bin.sim_energy_bin_centers * 1e9 - ) # astromodels expects energies in keV + energy_centers_keV = response_energy_bin.sim_energy_bin_centers * 1e9 # astromodels expects energies in keV # This call needs to be here because the parameters of the model might change, # for example during a fit @@ -113,9 +100,7 @@ def get_source_map( source_diff_spectrum = self._source(energy_centers_keV, tag=tag) # This provide a way to force the use of integration, for testing - if ( - os.environ.get("HAL_INTEGRATE_POINT_SOURCE", "").lower() == "yes" - ): # pragma: no cover + if os.environ.get("HAL_INTEGRATE_POINT_SOURCE", '').lower() == 'yes': # pragma: no cover integrate = True @@ -125,46 +110,26 @@ def get_source_map( # It is off by default because it is slower and it doesn't provide any improvement in accuracy # according to my simulations (GV) - interp_spectrum = LogLogInterpolator( - response_energy_bin.sim_energy_bin_centers, - source_diff_spectrum * 1e9, - k=2, - ) - - interp_sim_spectrum = LogLogInterpolator( - response_energy_bin.sim_energy_bin_centers, - response_energy_bin.sim_differential_photon_fluxes, - k=2, - ) - - src_spectrum = [ - interp_spectrum.integral(a, b) - for (a, b) in zip( - response_energy_bin.sim_energy_bin_low, - response_energy_bin.sim_energy_bin_hi, - ) - ] - - sim_spectrum = [ - interp_sim_spectrum.integral(a, b) - for (a, b) in zip( - response_energy_bin.sim_energy_bin_low, - response_energy_bin.sim_energy_bin_hi, - ) - ] + interp_spectrum = LogLogInterpolator(response_energy_bin.sim_energy_bin_centers, + source_diff_spectrum * 1e9, + k=2) + + interp_sim_spectrum = LogLogInterpolator(response_energy_bin.sim_energy_bin_centers, + response_energy_bin.sim_differential_photon_fluxes, + k=2) + + src_spectrum = [interp_spectrum.integral(a, b) for (a, b) in zip(response_energy_bin.sim_energy_bin_low, + response_energy_bin.sim_energy_bin_hi)] + + sim_spectrum = [interp_sim_spectrum.integral(a, b) for (a, b) in zip(response_energy_bin.sim_energy_bin_low, + response_energy_bin.sim_energy_bin_hi)] scale = old_div(np.array(src_spectrum), np.array(sim_spectrum)) else: # Transform from keV^-1 cm^-2 s^-1 to TeV^-1 cm^-2 s^-1 and re-weight the detected counts - scale = ( - old_div( - source_diff_spectrum, - response_energy_bin.sim_differential_photon_fluxes, - ) - * 1e9 - ) + scale = old_div(source_diff_spectrum, response_energy_bin.sim_differential_photon_fluxes) * 1e9 # Now return the map multiplied by the scale factor return np.sum(scale * response_energy_bin.sim_signal_events_per_bin) * this_map diff --git a/hawc_hal/convolved_source/convolved_source_container.py b/hawc_hal/convolved_source/convolved_source_container.py index 6095293..4b15b09 100644 --- a/hawc_hal/convolved_source/convolved_source_container.py +++ b/hawc_hal/convolved_source/convolved_source_container.py @@ -1,6 +1,4 @@ from builtins import object - - class ConvolvedSourcesContainer(object): def __init__(self): diff --git a/hawc_hal/flat_sky_projection.py b/hawc_hal/flat_sky_projection.py index d9400a8..7b0bdd7 100644 --- a/hawc_hal/flat_sky_projection.py +++ b/hawc_hal/flat_sky_projection.py @@ -33,21 +33,11 @@ def _get_header(ra, dec, pixel_size, coordsys, h, w): assert 0 <= ra <= 360 - header = fits.Header.fromstring( - _fits_header - % ( - h, - w, - old_div(h, 2), - ra, - pixel_size, - old_div(w, 2), - dec, - pixel_size, - coordsys, - ), - sep="\n", - ) + header = fits.Header.fromstring(_fits_header % (h, w, + old_div(h, 2), ra, pixel_size, + old_div(w, 2), dec, pixel_size, + coordsys), + sep='\n') return header @@ -59,7 +49,8 @@ def _get_all_ra_dec(input_wcs, h, w): xx = np.arange(0.5, h + 0.5, 1, dtype=np.int16) yy = np.arange(0.5, w + 0.5, 1, dtype=np.int16) - _ij_grid = cartesian((xx, yy)) + _ij_grid = cartesian((xx, + yy)) # Convert pixel coordinates to world coordinates world = input_wcs.all_pix2world(_ij_grid, 0, ra_dec_order=True) @@ -68,6 +59,7 @@ def _get_all_ra_dec(input_wcs, h, w): class FlatSkyProjection(object): + def __init__(self, ra_center, dec_center, pixel_size_deg, npix_height, npix_width): assert npix_height % 2 == 0, "Number of height pixels must be even" @@ -94,11 +86,7 @@ def __init__(self, ra_center, dec_center, pixel_size_deg, npix_height, npix_widt # Build projection, i.e., a World Coordinate System object - self._wcs = WCS( - _get_header( - ra_center, dec_center, pixel_size_deg, "icrs", npix_height, npix_width - ) - ) + self._wcs = WCS(_get_header(ra_center, dec_center, pixel_size_deg, 'icrs', npix_height, npix_width)) # Pre-compute all R.A., Decs self._ras, self._decs = _get_all_ra_dec(self._wcs, npix_height, npix_width) @@ -252,3 +240,4 @@ def project_plane_pixel_area(self): # self._distance_cache[key] = (ds[fine_selection_idx], selection_idx) # # return self._distance_cache[key] + diff --git a/hawc_hal/healpix_handling/__init__.py b/hawc_hal/healpix_handling/__init__.py index 1332c85..09df17b 100644 --- a/hawc_hal/healpix_handling/__init__.py +++ b/hawc_hal/healpix_handling/__init__.py @@ -2,4 +2,4 @@ from .flat_sky_to_healpix import FlatSkyToHealpixTransform from .sparse_healpix import SparseHealpix, DenseHealpix from .gnomonic_projection import get_gnomonic_projection -from .healpix_utils import radec_to_vec +from .healpix_utils import radec_to_vec \ No newline at end of file diff --git a/hawc_hal/healpix_handling/flat_sky_to_healpix.py b/hawc_hal/healpix_handling/flat_sky_to_healpix.py index 6c508ae..4a4822e 100644 --- a/hawc_hal/healpix_handling/flat_sky_to_healpix.py +++ b/hawc_hal/healpix_handling/flat_sky_to_healpix.py @@ -15,16 +15,16 @@ ORDER = {} -ORDER["nearest-neighbor"] = 0 -ORDER["bilinear"] = 1 -ORDER["biquadratic"] = 2 -ORDER["bicubic"] = 3 +ORDER['nearest-neighbor'] = 0 +ORDER['bilinear'] = 1 +ORDER['biquadratic'] = 2 +ORDER['bicubic'] = 3 COORDSYS = { - "g": Galactic(), - "c": ICRS(), - "icrs": ICRS(), + 'g': Galactic(), + 'c': ICRS(), + 'icrs': ICRS(), } @@ -48,13 +48,14 @@ def _convert_world_coordinates(lon_in, lat_in, wcs_in, wcs_out): lon_out_unit = u.Unit(wcs_out.wcs.cunit[0]) lat_out_unit = u.Unit(wcs_out.wcs.cunit[1]) - data = UnitSphericalRepresentation(lon_in * lon_in_unit, lat_in * lat_in_unit) + data = UnitSphericalRepresentation(lon_in * lon_in_unit, + lat_in * lat_in_unit) coords_in = frame_in.realize_frame(data) coords_out = coords_in.transform_to(frame_out) - lon_out = coords_out.represent_as("unitspherical").lon.to(lon_out_unit).value - lat_out = coords_out.represent_as("unitspherical").lat.to(lat_out_unit).value + lon_out = coords_out.represent_as('unitspherical').lon.to(lon_out_unit).value + lat_out = coords_out.represent_as('unitspherical').lat.to(lat_out_unit).value return lon_out, lat_out @@ -68,31 +69,20 @@ class FlatSkyToHealpixTransform(object): the transformation. This avoids to re-compute the same quantities over and over again. """ - def __init__( - self, - wcs_in, - coord_system_out, - nside, - pixels_id, - input_shape, - order="bilinear", - nested=False, - ): + def __init__(self, wcs_in, coord_system_out, nside, pixels_id, input_shape, order='bilinear', nested=False): # Look up lon, lat of pixels in output system and convert colatitude theta # and longitude phi to longitude and latitude. theta, phi = hp.pix2ang(nside, pixels_id, nested) lon_out = np.degrees(phi) - lat_out = 90.0 - np.degrees(theta) + lat_out = 90. - np.degrees(theta) # Convert between celestial coordinates coord_system_out = _parse_coord_system(coord_system_out) - with np.errstate(invalid="ignore"): - lon_in, lat_in = _convert_world_coordinates( - lon_out, lat_out, (coord_system_out, u.deg, u.deg), wcs_in - ) + with np.errstate(invalid='ignore'): + lon_in, lat_in = _convert_world_coordinates(lon_out, lat_out, (coord_system_out, u.deg, u.deg), wcs_in) # Look up pixels in input system yinds, xinds = wcs_in.wcs_world2pix(lon_in, lat_in, 0) diff --git a/hawc_hal/healpix_handling/gnomonic_projection.py b/hawc_hal/healpix_handling/gnomonic_projection.py index 8e59820..c4a0fca 100644 --- a/hawc_hal/healpix_handling/gnomonic_projection.py +++ b/hawc_hal/healpix_handling/gnomonic_projection.py @@ -16,20 +16,18 @@ def get_gnomonic_projection(figure, hpx_map, **kwargs): :return: the array containing the projection. """ - defaults = { - "coord": "C", - "rot": None, - "format": "%g", - "flip": "astro", - "xsize": 200, - "ysize": None, - "reso": 1.5, - "nest": False, - "min": None, - "max": None, - "cmap": None, - "norm": None, - } + defaults = {'coord': 'C', + 'rot': None, + 'format': '%g', + 'flip': 'astro', + 'xsize': 200, + 'ysize': None, + 'reso': 1.5, + 'nest': False, + 'min': None, + 'max': None, + 'cmap': None, + 'norm': None} for key, default_value in list(defaults.items()): @@ -49,31 +47,26 @@ def get_gnomonic_projection(figure, hpx_map, **kwargs): # extent[3] - margins[3] - margins[1]) extent = (0.05, 0.05, 0.9, 0.9) - ax = PA.HpxGnomonicAxes( - figure, - extent, - coord=kwargs["coord"], - rot=kwargs["rot"], - format=kwargs["format"], - flipconv=kwargs["flip"], - ) + ax = PA.HpxGnomonicAxes(figure, extent, + coord=kwargs['coord'], + rot=kwargs['rot'], + format=kwargs['format'], + flipconv=kwargs['flip']) # Suppress warnings about nans with np.warnings.catch_warnings(): - np.warnings.filterwarnings("ignore") + np.warnings.filterwarnings('ignore') - img = ax.projmap( - hpx_map, - nest=kwargs["nest"], - coord=kwargs["coord"], - vmin=kwargs["min"], - vmax=kwargs["max"], - xsize=kwargs["xsize"], - ysize=kwargs["ysize"], - reso=kwargs["reso"], - cmap=kwargs["cmap"], - norm=kwargs["norm"], - ) + img = ax.projmap(hpx_map, + nest=kwargs['nest'], + coord=kwargs['coord'], + vmin=kwargs['min'], + vmax=kwargs['max'], + xsize=kwargs['xsize'], + ysize=kwargs['ysize'], + reso=kwargs['reso'], + cmap=kwargs['cmap'], + norm=kwargs['norm']) return img diff --git a/hawc_hal/healpix_handling/healpix_utils.py b/hawc_hal/healpix_handling/healpix_utils.py index 3a56e0d..e8d552c 100644 --- a/hawc_hal/healpix_handling/healpix_utils.py +++ b/hawc_hal/healpix_handling/healpix_utils.py @@ -25,4 +25,4 @@ def radec_to_vec(ra, dec): # ra = np.rad2deg(phi) # dec = np.rad2deg(0.5 * np.pi - theta) # -# return ra, dec +# return ra, dec \ No newline at end of file diff --git a/hawc_hal/healpix_handling/sparse_healpix.py b/hawc_hal/healpix_handling/sparse_healpix.py index f9d13c7..dd4907e 100644 --- a/hawc_hal/healpix_handling/sparse_healpix.py +++ b/hawc_hal/healpix_handling/sparse_healpix.py @@ -65,6 +65,7 @@ def to_pandas(self): class SparseHealpix(HealpixWrapperBase): + def __init__(self, partial_map, pixels_ids, nside, fill_value=UNSEEN): self._partial_map = partial_map @@ -81,7 +82,7 @@ def __add__(self, other_map): added = self.as_partial() + other_map.as_partial() sparse_added = SparseHealpix(added, self._pixels_ids, self.nside) - + return sparse_added def __sub__(self, other_map): @@ -126,6 +127,7 @@ def pixels_ids(self): return self._pixels_ids + class DenseHealpix(HealpixWrapperBase): """ A dense (fullsky) healpix map. In this case partial and complete are the same map. @@ -136,9 +138,7 @@ def __init__(self, healpix_array): self._dense_map = healpix_array - super(DenseHealpix, self).__init__( - nside=hp.npix2nside(healpix_array.shape[0]), sparse=False - ) + super(DenseHealpix, self).__init__(nside=hp.npix2nside(healpix_array.shape[0]), sparse=False) def as_dense(self): """ @@ -159,3 +159,4 @@ def set_new_values(self, new_values): assert new_values.shape == self._dense_map.shape self._dense_map[:] = new_values + diff --git a/hawc_hal/interpolation/__init__.py b/hawc_hal/interpolation/__init__.py index d2b5c74..50ef670 100644 --- a/hawc_hal/interpolation/__init__.py +++ b/hawc_hal/interpolation/__init__.py @@ -1,3 +1,3 @@ from __future__ import absolute_import from .fast_bilinar_interpolation import FastBilinearInterpolation -from .irregular_grid import FastLinearInterpolatorIrregularGrid # pragma: no cover +from .irregular_grid import FastLinearInterpolatorIrregularGrid # pragma: no cover \ No newline at end of file diff --git a/hawc_hal/interpolation/fast_bilinar_interpolation.py b/hawc_hal/interpolation/fast_bilinar_interpolation.py index 55dbab9..c383a68 100644 --- a/hawc_hal/interpolation/fast_bilinar_interpolation.py +++ b/hawc_hal/interpolation/fast_bilinar_interpolation.py @@ -28,8 +28,8 @@ def __init__(self, input_shape, new_points): @staticmethod def _find_bounding_box(xaxis, yaxis, xs, ys): # Find lower left corner of bounding box - xidx = np.searchsorted(xaxis, xs, "left") - 1 - yidx = np.searchsorted(yaxis, ys, "left") - 1 + xidx = np.searchsorted(xaxis, xs, 'left') - 1 + yidx = np.searchsorted(yaxis, ys, 'left') - 1 lower_left_x = xaxis[xidx] lower_left_y = yaxis[yidx] @@ -43,16 +43,10 @@ def _find_bounding_box(xaxis, yaxis, xs, ys): lower_right_x = xaxis[xidx + 1] lower_right_y = yaxis[yidx] - return ( - lower_left_x, - lower_left_y, - upper_left_x, - upper_left_y, - upper_right_x, - upper_right_y, - lower_right_x, - lower_right_y, - ) + return (lower_left_x, lower_left_y, + upper_left_x, upper_left_y, + upper_right_x, upper_right_y, + lower_right_x, lower_right_y) def compute_coefficients(self, p): @@ -66,9 +60,7 @@ def compute_coefficients(self, p): # # y2 q12 q22 - x1, y2, xx1, y1, x2, yy1, xx2, yy2 = self._find_bounding_box( - self._gridx, self._gridy, xx, yy - ) + x1, y2, xx1, y1, x2, yy1, xx2, yy2 = self._find_bounding_box(self._gridx, self._gridy, xx, yy) bs = np.zeros((xx.shape[0], 4), np.float64) @@ -86,9 +78,10 @@ def compute_coefficients(self, p): # Stack them so that they are in the right order, i.e.: # ul1, ur1, ll1, lr1, ul2, ur2, ll2, lr2 ... uln, urn, lln, lrn - flat_points = np.vstack( - [flat_upper_left, flat_upper_right, flat_lower_left, flat_lower_right] - ).T.flatten() + flat_points = np.vstack([flat_upper_left, + flat_upper_right, + flat_lower_left, + flat_lower_right]).T.flatten() return bs, flat_points.astype(np.int64) @@ -104,4 +97,4 @@ def _apply_bilinar_interpolation(bs, points, data): # pragma: no cover vs = data.ravel()[points] - return np.sum(bs * vs.reshape(bs.shape), axis=1).flatten() + return np.sum(bs * vs.reshape(bs.shape), axis=1).flatten() \ No newline at end of file diff --git a/hawc_hal/interpolation/irregular_grid.py b/hawc_hal/interpolation/irregular_grid.py index 31042fc..583d955 100644 --- a/hawc_hal/interpolation/irregular_grid.py +++ b/hawc_hal/interpolation/irregular_grid.py @@ -17,17 +17,18 @@ def interp_weights(xy, uv, d=2): # pragma: no cover delta = uv - temp[:, d] - bary = np.einsum("njk,nk->nj", temp[:, :d, :], delta) + bary = np.einsum('njk,nk->nj', temp[:, :d, :], delta) return vertices, np.hstack((bary, 1 - bary.sum(axis=1, keepdims=True))) def interpolate(values, vtx, wts): # pragma: no cover - return np.einsum("nj,nj->n", np.take(values, vtx), wts) + return np.einsum('nj,nj->n', np.take(values, vtx), wts) class FastLinearInterpolatorIrregularGrid(object): # pragma: no cover + def __init__(self, data_shape, new_coords): old_coords = cartesian([np.arange(data_shape[0]), np.arange(data_shape[1])]) @@ -36,4 +37,4 @@ def __init__(self, data_shape, new_coords): def __call__(self, data): - return interpolate(data, self._vtx, self._wts) + return interpolate(data, self._vtx, self._wts) \ No newline at end of file diff --git a/hawc_hal/interpolation/log_log_interpolation.py b/hawc_hal/interpolation/log_log_interpolation.py index 0d01f2f..146fd25 100644 --- a/hawc_hal/interpolation/log_log_interpolation.py +++ b/hawc_hal/interpolation/log_log_interpolation.py @@ -6,6 +6,7 @@ class LogLogInterpolator(object): # pragma: no cover + def __init__(self, x, y, k=2): y = y.astype(np.float64) @@ -18,7 +19,7 @@ def __init__(self, x, y, k=2): def __call__(self, x): - return 10 ** self._interp(log10(x)) + return 10**self._interp(log10(x)) def integral(self, a, b, n_points=100, k=1): @@ -29,4 +30,4 @@ def integral(self, a, b, n_points=100, k=1): int_interp = scipy.interpolate.InterpolatedUnivariateSpline(xx, yy, k=k) - return int_interp.integral(a, b) + return int_interp.integral(a, b) \ No newline at end of file diff --git a/hawc_hal/log_likelihood.py b/hawc_hal/log_likelihood.py index 955020e..80b7ad3 100644 --- a/hawc_hal/log_likelihood.py +++ b/hawc_hal/log_likelihood.py @@ -4,17 +4,9 @@ # This function has two signatures in numba because if there are no sources in the likelihood model, # then expected_model_counts is 0.0 -@jit( - [ - "float64(float64[:], float64[:], float64[:])", - "float64(float64[:], float64[:], float64)", - ], - nopython=True, - parallel=False, -) -def log_likelihood( - observed_counts, expected_bkg_counts, expected_model_counts -): # pragma: no cover +@jit(["float64(float64[:], float64[:], float64[:])", "float64(float64[:], float64[:], float64)"], + nopython=True, parallel=False) +def log_likelihood(observed_counts, expected_bkg_counts, expected_model_counts): # pragma: no cover """ Poisson log-likelihood minus log factorial minus bias. The bias migth be needed to keep the numerical value of the likelihood small enough so that there aren't numerical problems when computing differences between two @@ -34,4 +26,4 @@ def log_likelihood( log_likes = observed_counts * np.log(predicted_counts) - predicted_counts - return np.sum(log_likes) + return np.sum(log_likes) \ No newline at end of file diff --git a/hawc_hal/maptree/data_analysis_bin.py b/hawc_hal/maptree/data_analysis_bin.py index 56da6b4..d98717e 100644 --- a/hawc_hal/maptree/data_analysis_bin.py +++ b/hawc_hal/maptree/data_analysis_bin.py @@ -4,27 +4,16 @@ class DataAnalysisBin(object): - def __init__( - self, - name, - observation_hpx_map, - background_hpx_map, - active_pixels_ids, - n_transits, - scheme="RING", - ): + + def __init__(self, name, observation_hpx_map, background_hpx_map, active_pixels_ids, n_transits, scheme='RING'): # Get nside self._nside = observation_hpx_map.nside nside_bkg = background_hpx_map.nside - assert ( - self._nside == nside_bkg - ), "Observation and background maps have " "different nside (%i vs %i)" % ( - self._nside, - nside_bkg, - ) + assert self._nside == nside_bkg, "Observation and background maps have " \ + "different nside (%i vs %i)" % (self._nside, nside_bkg) self._npix = observation_hpx_map.npix @@ -39,7 +28,7 @@ def __init__( # Store name and scheme self._name = str(name) - assert scheme in ["RING", "NEST"], "Scheme must be either RING or NEST" + assert scheme in ['RING', 'NEST'], "Scheme must be either RING or NEST" self._scheme = scheme @@ -48,23 +37,17 @@ def __init__( def to_pandas(self): # Make a dataframe - df = pd.DataFrame.from_dict( - { - "observation": self._observation_hpx_map.to_pandas(), - "background": self._background_hpx_map.to_pandas(), - } - ) + df = pd.DataFrame.from_dict({'observation': self._observation_hpx_map.to_pandas(), + 'background': self._background_hpx_map.to_pandas()}) if self._active_pixels_ids is not None: # We are saving only a subset df.set_index(self._active_pixels_ids, inplace=True) # Some metadata - meta = { - "scheme": 0 if self._scheme == "RING" else 1, - "n_transits": self._n_transits, - "nside": self._nside, - } + meta = {'scheme': 0 if self._scheme == 'RING' else 1, + 'n_transits': self._n_transits, + 'nside': self._nside} return df, meta diff --git a/hawc_hal/maptree/map_tree.py b/hawc_hal/maptree/map_tree.py index 421914a..071857f 100644 --- a/hawc_hal/maptree/map_tree.py +++ b/hawc_hal/maptree/map_tree.py @@ -125,12 +125,8 @@ def display(self): df = pd.DataFrame() df["Bin"] = list(self._analysis_bins.keys()) - df["Nside"] = [ - self._analysis_bins[bin_id].nside for bin_id in self._analysis_bins - ] - df["Scheme"] = [ - self._analysis_bins[bin_id].scheme for bin_id in self._analysis_bins - ] + df["Nside"] = [self._analysis_bins[bin_id].nside for bin_id in self._analysis_bins] + df["Scheme"] = [self._analysis_bins[bin_id].scheme for bin_id in self._analysis_bins] # Compute observed counts, background counts, how many pixels we have in the ROI and # the sky area they cover @@ -197,9 +193,9 @@ def write(self, filename): analysis_bin = self._analysis_bins[bin_id] - assert ( - bin_id == analysis_bin.name - ), "Bin name inconsistency: {} != {}".format(bin_id, analysis_bin.name) + assert bin_id == analysis_bin.name, "Bin name inconsistency: {} != {}".format( + bin_id, analysis_bin.name + ) multi_index_keys.append(analysis_bin.name) @@ -227,9 +223,7 @@ def write(self, filename): else: - serializer.store_pandas_object( - "/ROI", pd.Series(), **self._roi.to_dict() - ) + serializer.store_pandas_object("/ROI", pd.Series(), **self._roi.to_dict()) else: diff --git a/hawc_hal/obsolete/fast_convolution.py b/hawc_hal/obsolete/fast_convolution.py index e037a7f..282541c 100644 --- a/hawc_hal/obsolete/fast_convolution.py +++ b/hawc_hal/obsolete/fast_convolution.py @@ -19,10 +19,10 @@ def _next_regular(target): return target # Quickly check if it's already a power of 2 - if not (target & (target - 1)): + if not (target & (target-1)): return target - match = float("inf") # Anything found will be smaller + match = float('inf') # Anything found will be smaller p5 = 1 while p5 < target: p35 = p5 @@ -33,10 +33,10 @@ def _next_regular(target): # Quickly find next power of 2 >= quotient try: - p2 = 2 ** ((quotient - 1).bit_length()) + p2 = 2**((quotient - 1).bit_length()) except AttributeError: # Fallback for Python <2.7 - p2 = 2 ** (len(bin(quotient - 1)) - 2) + p2 = 2**(len(bin(quotient - 1)) - 2) N = p2 * p35 if N == target: @@ -67,11 +67,13 @@ def _centered(arr, newsize): class FastConvolution(object): + def __init__(self, ras, decs, nhpix, nwpix, psf_instance): # Make the PSF image psf_image = np.zeros((nhpix, nwpix)) + self._ras = ras self._decs = decs @@ -106,13 +108,9 @@ def setup(self, expected_shape): def __call__(self, image): - assert alltrue( - image.shape == self._expected_shape - ), "Shape of image to be convolved is not correct. Re-run setup." + assert alltrue(image.shape == self._expected_shape), "Shape of image to be convolved is not correct. Re-run setup." - ret = irfftn(rfftn(image, self._fshape) * self._psf_fft, self._fshape)[ - self._fslice - ].copy() + ret = irfftn(rfftn(image, self._fshape) * self._psf_fft, self._fshape)[self._fslice].copy() return _centered(ret, self._expected_shape) @@ -132,16 +130,12 @@ def brute_force_convolution(image, kernel, output): half_size_minus_one = (kernel_size - 1) // 2 - for i in range(half_size_minus_one, h - half_size_minus_one): + for i in range(half_size_minus_one, h-half_size_minus_one): - for j in range(half_size_minus_one, w - half_size_minus_one): + for j in range(half_size_minus_one, w-half_size_minus_one): - output[j, i] = np.sum( - image[ - j - half_size_minus_one : j + half_size_minus_one + 1, - i - half_size_minus_one : i + half_size_minus_one + 1, - ] - * kernel - ) + output[j, i] = np.sum(image[j - half_size_minus_one : j + half_size_minus_one + 1, + i - half_size_minus_one : i + half_size_minus_one + 1] + * kernel) - return output + return output \ No newline at end of file diff --git a/hawc_hal/obsolete/image_to_healpix.py b/hawc_hal/obsolete/image_to_healpix.py index e8e1782..0472f8f 100644 --- a/hawc_hal/obsolete/image_to_healpix.py +++ b/hawc_hal/obsolete/image_to_healpix.py @@ -1,8 +1,4 @@ -from hawc_hal.healpix_handling.flat_sky_to_healpix import ( - _parse_coord_system, - _convert_world_coordinates, - ORDER, -) +from hawc_hal.healpix_handling.flat_sky_to_healpix import _parse_coord_system, _convert_world_coordinates, ORDER from hawc_hal.special_values import UNSEEN import healpy as hp import numpy as np @@ -11,19 +7,9 @@ from astropy import units as u - -def image_to_healpix( - data, - wcs_in, - coord_system_out, - nside, - pixels_id, - order="bilinear", - nested=False, - fill_value=UNSEEN, - pixels_to_be_zeroed=None, - full=False, -): +def image_to_healpix(data, wcs_in, coord_system_out, + nside, pixels_id, order='bilinear', nested=False, + fill_value=UNSEEN, pixels_to_be_zeroed=None, full=False): npix = hp.nside2npix(nside) @@ -32,15 +18,13 @@ def image_to_healpix( theta, phi = hp.pix2ang(nside, pixels_id, nested) lon_out = np.degrees(phi) - lat_out = 90.0 - np.degrees(theta) + lat_out = 90. - np.degrees(theta) # Convert between celestial coordinates coord_system_out = _parse_coord_system(coord_system_out) - with np.errstate(invalid="ignore"): - lon_in, lat_in = _convert_world_coordinates( - lon_out, lat_out, (coord_system_out, u.deg, u.deg), wcs_in - ) + with np.errstate(invalid='ignore'): + lon_in, lat_in = _convert_world_coordinates(lon_out, lat_out, (coord_system_out, u.deg, u.deg), wcs_in) # Look up pixels in input system yinds, xinds = wcs_in.wcs_world2pix(lon_in, lat_in, 0) @@ -50,9 +34,9 @@ def image_to_healpix( if isinstance(order, six.string_types): order = ORDER[order] - healpix_data_ = map_coordinates( - data, [xinds, yinds], order=order, mode="constant", cval=fill_value - ) + healpix_data_ = map_coordinates(data, [xinds, yinds], + order=order, + mode='constant', cval=fill_value) if not full: @@ -69,10 +53,8 @@ def image_to_healpix( if pixels_to_be_zeroed is not None: - healpix_data[pixels_to_be_zeroed] = np.where( - np.isnan(healpix_data[pixels_to_be_zeroed]), - 0.0, - healpix_data[pixels_to_be_zeroed], - ) + healpix_data[pixels_to_be_zeroed] = np.where(np.isnan(healpix_data[pixels_to_be_zeroed]), + 0.0, + healpix_data[pixels_to_be_zeroed]) return healpix_data diff --git a/hawc_hal/obsolete/tf1_wrapper.py b/hawc_hal/obsolete/tf1_wrapper.py index 4700880..08cfaa4 100644 --- a/hawc_hal/obsolete/tf1_wrapper.py +++ b/hawc_hal/obsolete/tf1_wrapper.py @@ -1,7 +1,6 @@ from builtins import object - - class TF1Wrapper(object): + def __init__(self, tf1_instance): # Make a copy so that if the passed instance was a pointer from a TFile, # it will survive the closing of the associated TFile @@ -16,4 +15,4 @@ def integral(self, *args, **kwargs): return self._tf1.Integral(*args, **kwargs) def __call__(self, *args, **kwargs): - return self._tf1.Eval(*args, **kwargs) + return self._tf1.Eval(*args, **kwargs) \ No newline at end of file diff --git a/hawc_hal/obsolete/ts_map.py b/hawc_hal/obsolete/ts_map.py index 288b59c..6691633 100644 --- a/hawc_hal/obsolete/ts_map.py +++ b/hawc_hal/obsolete/ts_map.py @@ -6,7 +6,6 @@ import numpy as np from threeML.io.logging import setup_logger - log = setup_logger(__name__) log.propagate = False @@ -19,10 +18,7 @@ import matplotlib.pyplot as plt -from threeML.parallel.parallel_client import ( - ParallelClient, - is_parallel_computation_active, -) +from threeML.parallel.parallel_client import ParallelClient, is_parallel_computation_active from tqdm.auto import tqdm from threeML.io.fits_file import FITSFile @@ -30,228 +26,203 @@ class ParallelTSmap(object): - def __init__( - self, - maptree, - response, - ra_c, - dec_c, - xsize, - ysize, - pix_scale, - projection="AIT", - roi_radius=3.0, - ): + + def __init__(self, maptree, response, ra_c, dec_c, xsize, ysize, pix_scale, projection = 'AIT', roi_radius=3.0): # Create a new WCS object so that we can compute the appropriare R.A. and Dec # where we need to compute the TS self._wcs = wcs.WCS(naxis=2) - + # The +1 is because the CRPIX should be in FORTRAN indexing, which starts at +1 self._wcs.wcs.crpix = [xsize / 2.0, ysize / 2.0] self._wcs.wcs.cdelt = [-pix_scale, pix_scale] self._wcs.wcs.crval = [ra_c, dec_c] self._wcs.wcs.ctype = ["RA---%s" % projection, "DEC--%s" % projection] - + self._ra_c = ra_c self._dec_c = dec_c - + self._mtfile = maptree self._rsfile = response - + self._points = [] - + # It is important that dec is the first one because the PSF for a Dec bin_name # is cached within one engine - + max_d = 0 - + for idec in range(ysize): - + for ira in range(xsize): - - this_ra, this_dec = self._wcs.wcs_pix2world(ira, idec, 0) - - self._points.append((this_ra, this_dec)) - - d = angular_separation(*np.deg2rad((this_ra, this_dec, ra_c, dec_c))) - - if d > max_d: - - max_d = d - + + this_ra, this_dec = self._wcs.wcs_pix2world(ira, idec, 0) + + self._points.append((this_ra, this_dec)) + + d = angular_separation(*np.deg2rad((this_ra, this_dec, ra_c, dec_c))) + + if d > max_d: + + max_d = d + log.info("Maximum distance from center: %.3f deg" % np.rad2deg(max_d)) - + # We keep track of how many ras we have so that when running in parallel all # the ras will run on the same engine with the same dec, maximizing the use # of the cache and minimizing the memory footprint - + self._n_ras = xsize self._n_decs = ysize - + self._roi_radius = float(roi_radius) roi = HealpixConeROI(self._roi_radius, ra=ra_c, dec=dec_c) - + self._llh = HAL("HAWC", self._mtfile, self._rsfile, roi) - self._llh.set_active_measurements(1, 9) - + self._llh.set_active_measurements(1,9) + # Make a fit with no source to get the likelihood for the null hypothesis model = self.get_model(0) - model.TestSource.spectrum.main.shape.K = ( - model.TestSource.spectrum.main.shape.K.min_value - ) - + model.TestSource.spectrum.main.shape.K = model.TestSource.spectrum.main.shape.K.min_value + self._llh.set_model(model) - + self._like0 = self._llh.get_log_like() - + # We will fill this with the maximum of the TS map - + self._max_ts = None - + def get_data(self, interval_id): - + datalist = DataList(self._llh) - + return datalist - + def get_model(self, interval_id): - + spectrum = Powerlaw() - + this_ra = self._points[interval_id][0] this_dec = self._points[interval_id][1] - - this_source = PointSource( - "TestSource", ra=this_ra, dec=this_dec, spectral_shape=spectrum - ) - + + this_source = PointSource("TestSource", ra=this_ra, dec=this_dec, spectral_shape=spectrum) + spectrum.piv = 1 * u.TeV spectrum.piv.fix = True - + # Start from a flux 1/10 of the Crab spectrum.K = crab_diff_flux_at_1_TeV spectrum.K.bounds = (1e-30, 1e-18) spectrum.index = -2.63 spectrum.index.fix = False - + model1 = Model(this_source) - + return model1 - + def worker(self, interval_id): - + model = self.get_model(interval_id) data = self.get_data(interval_id) - + jl = JointLikelihood(model, data) jl.set_minimizer("ROOT") par, like = jl.fit(quiet=True) + + return like['-log(likelihood)']['HAWC'] - return like["-log(likelihood)"]["HAWC"] - + def go(self): - + if is_parallel_computation_active(): - + client = ParallelClient() - + if self._n_decs % client.get_number_of_engines() != 0: - - log.warning( - "The number of Dec bands is not a multiple of the number of engine. Make it so for optimal performances.", - RuntimeWarning, - ) - - res = client.execute_with_progress_bar( - self.worker, list(range(len(self._points))), chunk_size=self._n_ras - ) - + + log.warning("The number of Dec bands is not a multiple of the number of engine. Make it so for optimal performances.", RuntimeWarning) + + res = client.execute_with_progress_bar(self.worker, list(range(len(self._points))), chunk_size=self._n_ras) + else: - + n_points = len(self._points) - + p = tqdm(total=n_points) - + res = np.zeros(n_points) - + for i, point in enumerate(self._points): res[i] = self.worker(i) p.update(1) - + TS = 2 * (-np.array(res) - self._like0) - - # self._debug_map = {k:v for v,k in zip(self._points, TS)} - + + #self._debug_map = {k:v for v,k in zip(self._points, TS)} + # Get maximum of TS idx = TS.argmax() self._max_ts = (TS[idx], self._points[idx]) - - log.info( - "Maximum TS is %.2f at (R.A., Dec) = (%.3f, %.3f)" - % (self._max_ts[0], self._max_ts[1][0], self._max_ts[1][1]) - ) - + + log.info("Maximum TS is %.2f at (R.A., Dec) = (%.3f, %.3f)" % (self._max_ts[0], self._max_ts[1][0], self._max_ts[1][1])) + self._ts_map = TS.reshape(self._n_decs, self._n_ras) - + return self._ts_map - + @property def maximum_of_map(self): - + return self._max_ts - + def to_fits(self, filename, overwrite=True): - + primary_hdu = pyfits.PrimaryHDU(data=self._ts_map, header=self._wcs.to_header()) - + primary_hdu.writeto(filename, overwrite=overwrite) - + def plot(self): - + # Draw TS map + + fig, ax = plt.subplots(subplot_kw = {'projection': self._wcs}) - fig, ax = plt.subplots(subplot_kw={"projection": self._wcs}) - - plt.imshow(self._ts_map, origin="lower", interpolation="none") - + plt.imshow(self._ts_map, origin='lower', interpolation='none') + # Draw colorbar - + cbar = plt.colorbar(format="%.1f") - + cbar.set_label("TS") - + # Plot 1, 2 and 3 sigma contour - _ = plt.contour( - self._ts_map, - origin="lower", - levels=self._ts_map.max() - np.array([4.605, 7.378, 9.210][::-1]), - colors=["black", "blue", "red"], - ) - + _ = plt.contour(self._ts_map, origin='lower', + levels=self._ts_map.max() - np.array([4.605, 7.378, 9.210][::-1]), + colors=['black', 'blue','red']) + # Access the first WCS coordinate ra = ax.coords[0] dec = ax.coords[1] - + # Set the format of the tick labels - ra.set_major_formatter("d.ddd") - dec.set_major_formatter("d.ddd") - - plt.xlabel("R.A. (J2000)") - plt.ylabel("Dec. (J2000)") - + ra.set_major_formatter('d.ddd') + dec.set_major_formatter('d.ddd') + + plt.xlabel('R.A. (J2000)') + plt.ylabel('Dec. (J2000)') + # Overlay the center - - ax.scatter( - [self._ra_c], [self._dec_c], transform=ax.get_transform("world"), marker="o" - ) - + + ax.scatter([self._ra_c],[self._dec_c], transform=ax.get_transform('world'), marker='o') + # Overlay the maximum TS ra_max, dec_max = self._max_ts[1] - - ax.scatter([ra_max], [dec_max], transform=ax.get_transform("world"), marker="x") - + + ax.scatter([ra_max],[dec_max], transform=ax.get_transform('world'), marker='x') + return fig + diff --git a/hawc_hal/psf_fast/__init__.py b/hawc_hal/psf_fast/__init__.py index bcf4043..60f1343 100644 --- a/hawc_hal/psf_fast/__init__.py +++ b/hawc_hal/psf_fast/__init__.py @@ -1,4 +1,4 @@ from __future__ import absolute_import from .psf_wrapper import PSFWrapper, InvalidPSF, InvalidPSFError from .psf_interpolator import PSFInterpolator -from .psf_convolutor import PSFConvolutor +from .psf_convolutor import PSFConvolutor \ No newline at end of file diff --git a/hawc_hal/psf_fast/psf_convolutor.py b/hawc_hal/psf_fast/psf_convolutor.py index c759c62..39f9eb2 100644 --- a/hawc_hal/psf_fast/psf_convolutor.py +++ b/hawc_hal/psf_fast/psf_convolutor.py @@ -5,8 +5,7 @@ from past.utils import old_div import numpy as np from numpy.fft import rfftn, irfftn - -# from scipy.signal import fftconvolve +#from scipy.signal import fftconvolve from scipy.fftpack import helper @@ -15,6 +14,7 @@ class PSFConvolutor(object): + def __init__(self, psf_wrapper, flat_sky_proj): self._psf = psf_wrapper # type: PSFWrapper @@ -22,30 +22,22 @@ def __init__(self, psf_wrapper, flat_sky_proj): # Compute an image of the PSF on the current defined flat sky projection interpolator = PSFInterpolator(psf_wrapper, flat_sky_proj) - psf_stamp = interpolator.point_source_image( - flat_sky_proj.ra_center, flat_sky_proj.dec_center - ) + psf_stamp = interpolator.point_source_image(flat_sky_proj.ra_center, flat_sky_proj.dec_center) # Crop the kernel at the appropriate radius for this PSF (making sure is divisible by 2) - kernel_radius_px = int( - np.ceil(old_div(self._psf.kernel_radius, flat_sky_proj.pixel_size)) - ) + kernel_radius_px = int(np.ceil(old_div(self._psf.kernel_radius, flat_sky_proj.pixel_size))) pixels_to_keep = kernel_radius_px * 2 - assert ( - pixels_to_keep <= psf_stamp.shape[0] - and pixels_to_keep <= psf_stamp.shape[1] - ), "The kernel is too large with respect to the model image. Enlarge your model radius." + assert pixels_to_keep <= psf_stamp.shape[0] and \ + pixels_to_keep <= psf_stamp.shape[1], \ + "The kernel is too large with respect to the model image. Enlarge your model radius." xoff = (psf_stamp.shape[0] - pixels_to_keep) // 2 yoff = (psf_stamp.shape[1] - pixels_to_keep) // 2 self._kernel = psf_stamp[yoff:-yoff, xoff:-xoff] - assert np.isclose(self._kernel.sum(), 1.0, rtol=1e-2), ( - "Failed to generate proper kernel normalization: got _kernel.sum() = %f; expected 1.0+-0.01." - % self._kernel.sum() - ) + assert np.isclose(self._kernel.sum(), 1.0, rtol=1e-2), "Failed to generate proper kernel normalization: got _kernel.sum() = %f; expected 1.0+-0.01." % self._kernel.sum() # Renormalize to exactly 1 self._kernel = old_div(self._kernel, self._kernel.sum()) @@ -76,13 +68,9 @@ def extended_source_image(self, ideal_image): # Convolve - assert np.alltrue( - ideal_image.shape == self._expected_shape - ), "Shape of image to be convolved is not correct." + assert np.alltrue(ideal_image.shape == self._expected_shape), "Shape of image to be convolved is not correct." - ret = irfftn(rfftn(ideal_image, self._fshape) * self._psf_fft, self._fshape)[ - self._fslice - ].copy() + ret = irfftn(rfftn(ideal_image, self._fshape) * self._psf_fft, self._fshape)[self._fslice].copy() conv = _centered(ret, self._expected_shape) diff --git a/hawc_hal/psf_fast/psf_interpolator.py b/hawc_hal/psf_fast/psf_interpolator.py index f1a6e4c..7055637 100644 --- a/hawc_hal/psf_fast/psf_interpolator.py +++ b/hawc_hal/psf_fast/psf_interpolator.py @@ -17,14 +17,13 @@ def _divide_in_blocks(arr, nrows, ncols): each subblock preserving the "physical" layout of arr. """ h, w = arr.shape - return ( - arr.reshape(h // nrows, nrows, -1, ncols) - .swapaxes(1, 2) - .reshape(-1, nrows, ncols) - ) + return (arr.reshape(h // nrows, nrows, -1, ncols) + .swapaxes(1, 2) + .reshape(-1, nrows, ncols)) class PSFInterpolator(object): + def __init__(self, psf_wrapper, flat_sky_proj): self._psf = psf_wrapper @@ -40,41 +39,32 @@ def _get_point_source_image_aitoff(self, ra, dec, psf_integration_method): # First we obtain an image with a flat sky projection centered exactly on the point source ancillary_image_pixel_size = 0.05 - pixel_side = 2 * int( - np.ceil(old_div(self._psf.truncation_radius, ancillary_image_pixel_size)) - ) - ancillary_flat_sky_proj = flat_sky_projection.FlatSkyProjection( - ra, dec, ancillary_image_pixel_size, pixel_side, pixel_side - ) + pixel_side = 2 * int(np.ceil(old_div(self._psf.truncation_radius, ancillary_image_pixel_size))) + ancillary_flat_sky_proj = flat_sky_projection.FlatSkyProjection(ra, dec, ancillary_image_pixel_size, + pixel_side, pixel_side) # Now we compute the angular distance of all pixels in the ancillary image with respect to the center - angular_distances = sphere_dist( - ra, dec, ancillary_flat_sky_proj.ras, ancillary_flat_sky_proj.decs - ) + angular_distances = sphere_dist(ra, dec, ancillary_flat_sky_proj.ras, ancillary_flat_sky_proj.decs) # Compute the brightness (i.e., the differential PSF) - ancillary_brightness = ( - self._psf.brightness(angular_distances).reshape((pixel_side, pixel_side)) - * self._flat_sky_p.project_plane_pixel_area - ) + ancillary_brightness = self._psf.brightness(angular_distances).reshape((pixel_side, pixel_side)) * \ + self._flat_sky_p.project_plane_pixel_area # Now reproject this brightness on the new image - if psf_integration_method == "exact": + if psf_integration_method == 'exact': reprojection_method = reproject.reproject_exact - additional_keywords = {"parallel": False} + additional_keywords = {'parallel': False} else: reprojection_method = reproject.reproject_interp additional_keywords = {} - brightness, _ = reprojection_method( - (ancillary_brightness, ancillary_flat_sky_proj.wcs), - self._flat_sky_p.wcs, - shape_out=(self._flat_sky_p.npix_height, self._flat_sky_p.npix_width), - **additional_keywords - ) + brightness, _ = reprojection_method((ancillary_brightness, ancillary_flat_sky_proj.wcs), + self._flat_sky_p.wcs, shape_out=(self._flat_sky_p.npix_height, + self._flat_sky_p.npix_width), + **additional_keywords) brightness[np.isnan(brightness)] = 0.0 @@ -130,7 +120,7 @@ def _get_point_source_image_aitoff(self, ra, dec, psf_integration_method): return point_source_img_ait - def point_source_image(self, ra_src, dec_src, psf_integration_method="exact"): + def point_source_image(self, ra_src, dec_src, psf_integration_method='exact'): # Make a unique key for this request key = (ra_src, dec_src, psf_integration_method) @@ -142,9 +132,7 @@ def point_source_image(self, ra_src, dec_src, psf_integration_method="exact"): else: - point_source_img_ait = self._get_point_source_image_aitoff( - ra_src, dec_src, psf_integration_method - ) + point_source_img_ait = self._get_point_source_image_aitoff(ra_src, dec_src, psf_integration_method) # Store for future use diff --git a/hawc_hal/psf_fast/psf_wrapper.py b/hawc_hal/psf_fast/psf_wrapper.py index 762bccc..1279e5d 100644 --- a/hawc_hal/psf_fast/psf_wrapper.py +++ b/hawc_hal/psf_fast/psf_wrapper.py @@ -30,9 +30,7 @@ def __init__(self, xs, ys, brightness_interp_x=None, brightness_interp_y=None): # Memorize the total integral (will use it for normalization) - self._total_integral = self._psf_interpolated.integral( - self._xs[0], _INTEGRAL_OUTER_RADIUS - ) + self._total_integral = self._psf_interpolated.integral(self._xs[0], _INTEGRAL_OUTER_RADIUS) # Now compute the truncation radius, which is a very conservative measurement # of the size of the PSF @@ -92,9 +90,7 @@ def find_eef_radius(self, fraction): f = lambda r: fraction - old_div(self.integral(1e-4, r), self._total_integral) - radius, status = scipy.optimize.brentq( - f, 0.005, _INTEGRAL_OUTER_RADIUS, full_output=True - ) + radius, status = scipy.optimize.brentq(f, 0.005, _INTEGRAL_OUTER_RADIUS, full_output=True) assert status.converged, "Brentq did not converged" @@ -135,9 +131,7 @@ def combine_with_other_psf(self, other_psf, w1, w2): new_ys = w1 * self.ys + w2 * other_psf.ys # Also weight the brightness interpolation points - new_br_interp_y = ( - w1 * self._brightness_interp_y + w2 * other_psf._brightness_interp_y - ) + new_br_interp_y = w1 * self._brightness_interp_y + w2 * other_psf._brightness_interp_y return PSFWrapper( self.xs, diff --git a/hawc_hal/region_of_interest/__init__.py b/hawc_hal/region_of_interest/__init__.py index 3a779cb..2bb7871 100644 --- a/hawc_hal/region_of_interest/__init__.py +++ b/hawc_hal/region_of_interest/__init__.py @@ -10,6 +10,6 @@ def get_roi_from_dict(dictionary): :return: """ - roi_type = dictionary["ROI type"] + roi_type = dictionary['ROI type'] return globals()[roi_type].from_dict(dictionary) diff --git a/hawc_hal/region_of_interest/healpix_cone_roi.py b/hawc_hal/region_of_interest/healpix_cone_roi.py index 196f89e..a60d0e1 100644 --- a/hawc_hal/region_of_interest/healpix_cone_roi.py +++ b/hawc_hal/region_of_interest/healpix_cone_roi.py @@ -12,11 +12,9 @@ from ..flat_sky_projection import FlatSkyProjection from threeML.io.logging import setup_logger - log = setup_logger(__name__) log.propagate = False - def _get_radians(my_angle): if isinstance(my_angle, u.Quantity): @@ -31,6 +29,7 @@ def _get_radians(my_angle): class HealpixConeROI(HealpixROIBase): + def __init__(self, data_radius, model_radius, *args, **kwargs): """ A cone Region of Interest defined by a center and a radius. @@ -62,38 +61,24 @@ def to_dict(self): ra, dec = self.ra_dec_center - s = { - "ROI type": type(self).__name__.split(".")[-1], - "ra": ra, - "dec": dec, - "data_radius_deg": np.rad2deg(self._data_radius_radians), - "model_radius_deg": np.rad2deg(self._model_radius_radians), - } + s = {'ROI type': type(self).__name__.split(".")[-1], + 'ra': ra, + 'dec': dec, + 'data_radius_deg': np.rad2deg(self._data_radius_radians), + 'model_radius_deg': np.rad2deg(self._model_radius_radians)} return s @classmethod def from_dict(cls, data): - return cls( - data["data_radius_deg"], - data["model_radius_deg"], - ra=data["ra"], - dec=data["dec"], - ) + return cls(data['data_radius_deg'], data['model_radius_deg'], ra=data['ra'], dec=data['dec']) def __str__(self): - s = ( - "%s: Center (R.A., Dec) = (%.3f, %.3f), data radius = %.3f deg, model radius: %.3f deg" - % ( - type(self).__name__, - self.ra_dec_center[0], - self.ra_dec_center[1], - self.data_radius.to(u.deg).value, - self.model_radius.to(u.deg).value, - ) - ) + s = ("%s: Center (R.A., Dec) = (%.3f, %.3f), data radius = %.3f deg, model radius: %.3f deg" % + (type(self).__name__, self.ra_dec_center[0], self.ra_dec_center[1], + self.data_radius.to(u.deg).value, self.model_radius.to(u.deg).value)) return s @@ -135,9 +120,7 @@ def _active_pixels(self, nside, ordering): nest = ordering is _NESTED - pixels_inside_cone = hp.query_disc( - nside, vec, self._data_radius_radians, inclusive=False, nest=nest - ) + pixels_inside_cone = hp.query_disc(nside, vec, self._data_radius_radians, inclusive=False, nest=nest) return pixels_inside_cone @@ -146,16 +129,13 @@ def get_flat_sky_projection(self, pixel_size_deg): # Decide side for image # Compute number of pixels, making sure it is going to be even (by approximating up) - npix_per_side = 2 * int( - np.ceil(old_div(np.rad2deg(self._model_radius_radians), pixel_size_deg)) - ) + npix_per_side = 2 * int(np.ceil(old_div(np.rad2deg(self._model_radius_radians), pixel_size_deg))) # Get lon, lat of center ra, dec = self._get_ra_dec() # This gets a list of all RA, Decs for an AIT-projected image of npix_per_size x npix_per_side - flat_sky_proj = FlatSkyProjection( - ra, dec, pixel_size_deg, npix_per_side, npix_per_side - ) + flat_sky_proj = FlatSkyProjection(ra, dec, pixel_size_deg, npix_per_side, npix_per_side) return flat_sky_proj + diff --git a/hawc_hal/region_of_interest/healpix_map_roi.py b/hawc_hal/region_of_interest/healpix_map_roi.py index fcdf2c1..d579683 100644 --- a/hawc_hal/region_of_interest/healpix_map_roi.py +++ b/hawc_hal/region_of_interest/healpix_map_roi.py @@ -5,7 +5,6 @@ import healpy as hp from threeML.io.logging import setup_logger - log = setup_logger(__name__) log.propagate = False from astromodels.core.sky_direction import SkyDirection @@ -16,16 +15,8 @@ class HealpixMapROI(HealpixROIBase): - def __init__( - self, - data_radius, - model_radius, - roimap=None, - roifile=None, - threshold=0.5, - *args, - **kwargs - ): + + def __init__(self, data_radius, model_radius, roimap=None, roifile=None, threshold=0.5, *args, **kwargs): """ A cone Region of Interest defined by a healpix map (can be read from a fits file). User needs to supply a cone region (center and radius) defining the plane projection for the model map. @@ -52,10 +43,8 @@ def __init__( :param args: arguments for the SkyDirection class of astromodels :param kwargs: keywords for the SkyDirection class of astromodels """ - - assert ( - roifile is not None or roimap is not None - ), "Must supply either healpix map or fitsfile to create HealpixMapROI" + + assert roifile is not None or roimap is not None, "Must supply either healpix map or fitsfile to create HealpixMapROI" self._center = SkyDirection(*args, **kwargs) @@ -67,90 +56,71 @@ def __init__( self.read_map(roimap=roimap, roifile=roifile) - def read_map(self, roimap=None, roifile=None): - assert ( - roifile is not None or roimap is not None - ), "Must supply either healpix map or fits file" + def read_map(self, roimap=None, roifile=None): + assert roifile is not None or roimap is not None, \ + "Must supply either healpix map or fits file" + if roimap is not None: roimap = roimap self._filename = None elif roifile is not None: self._filename = roifile - roimap = hp.fitsfunc.read_map(self._filename) + roimap = hp.fitsfunc.read_map(self._filename) self._roimaps = {} self._original_nside = hp.npix2nside(roimap.shape[0]) self._roimaps[self._original_nside] = roimap - + self.check_roi_inside_model() + def check_roi_inside_model(self): active_pixels = self.active_pixels(self._original_nside) radius = np.rad2deg(self._model_radius_radians) ra, dec = self.ra_dec_center - temp_roi = HealpixConeROI( - data_radius=radius, model_radius=radius, ra=ra, dec=dec - ) + temp_roi = HealpixConeROI(data_radius = radius , model_radius=radius, ra=ra, dec=dec) - model_pixels = temp_roi.active_pixels(self._original_nside) + model_pixels = temp_roi.active_pixels( self._original_nside ) if not all(p in model_pixels for p in active_pixels): - log.warning( - "Some pixels inside your ROI are not contained in the model map." - ) + log.warning("Some pixels inside your ROI are not contained in the model map.") def to_dict(self): ra, dec = self.ra_dec_center - s = { - "ROI type": type(self).__name__.split(".")[-1], - "ra": ra, - "dec": dec, - "model_radius_deg": np.rad2deg(self._model_radius_radians), - "data_radius_deg": np.rad2deg(self._data_radius_radians), - "roimap": self._roimaps[self._original_nside], - "threshold": self._threshold, - "roifile": self._filename, - } + s = {'ROI type': type(self).__name__.split(".")[-1], + 'ra': ra, + 'dec': dec, + 'model_radius_deg': np.rad2deg(self._model_radius_radians), + 'data_radius_deg': np.rad2deg(self._data_radius_radians), + 'roimap': self._roimaps[self._original_nside], + 'threshold': self._threshold, + 'roifile': self._filename } return s @classmethod def from_dict(cls, data): - - return cls( - data["data_radius_deg"], - data["model_radius_deg"], - threshold=data["threshold"], - roimap=data["roimap"], - ra=data["ra"], - dec=data["dec"], - roifile=data["roifile"], - ) + + return cls(data['data_radius_deg'], data['model_radius_deg'], threshold=data['threshold'], + roimap=data['roimap'], ra=data['ra'], + dec=data['dec'], roifile=data['roifile']) def __str__(self): - s = ( - "%s: Center (R.A., Dec) = (%.3f, %.3f), model radius: %.3f deg, display radius: %.3f deg, threshold = %.2f" - % ( - type(self).__name__, - self.ra_dec_center[0], - self.ra_dec_center[1], - self.model_radius.to(u.deg).value, - self.data_radius.to(u.deg).value, - self._threshold, - ) - ) - - if self._filename is not None: - s = "%s, data ROI from %s" % (s, self._filename) + s = ("%s: Center (R.A., Dec) = (%.3f, %.3f), model radius: %.3f deg, display radius: %.3f deg, threshold = %.2f" % + (type(self).__name__, self.ra_dec_center[0], self.ra_dec_center[1], + self.model_radius.to(u.deg).value, self.data_radius.to(u.deg).value, self._threshold)) + if self._filename is not None: + s = "%s, data ROI from %s" % (s, self._filename) + return s def display(self): @@ -183,9 +153,7 @@ def _get_ra_dec(self): def _active_pixels(self, nside, ordering): if not nside in self._roimaps: - self._roimaps[nside] = hp.ud_grade( - self._roimaps[self._original_nside], nside_out=nside - ) + self._roimaps[nside] = hp.ud_grade(self._roimaps[self._original_nside], nside_out=nside) pixels_inside_roi = np.where(self._roimaps[nside] >= self._threshold)[0] @@ -196,16 +164,13 @@ def get_flat_sky_projection(self, pixel_size_deg): # Decide side for image # Compute number of pixels, making sure it is going to be even (by approximating up) - npix_per_side = 2 * int( - np.ceil(old_div(np.rad2deg(self._model_radius_radians), pixel_size_deg)) - ) + npix_per_side = 2 * int(np.ceil(old_div(np.rad2deg(self._model_radius_radians), pixel_size_deg))) # Get lon, lat of center ra, dec = self._get_ra_dec() # This gets a list of all RA, Decs for an AIT-projected image of npix_per_size x npix_per_side - flat_sky_proj = FlatSkyProjection( - ra, dec, pixel_size_deg, npix_per_side, npix_per_side - ) + flat_sky_proj = FlatSkyProjection(ra, dec, pixel_size_deg, npix_per_side, npix_per_side) return flat_sky_proj + diff --git a/hawc_hal/region_of_interest/healpix_roi_base.py b/hawc_hal/region_of_interest/healpix_roi_base.py index cf6321a..71ab65c 100644 --- a/hawc_hal/region_of_interest/healpix_roi_base.py +++ b/hawc_hal/region_of_interest/healpix_roi_base.py @@ -1,13 +1,13 @@ from builtins import object +_EQUATORIAL = 'equatorial' +_GALACTIC = 'galactic' -_EQUATORIAL = "equatorial" -_GALACTIC = "galactic" - -_RING = "RING" -_NESTED = "NESTED" +_RING = 'RING' +_NESTED = 'NESTED' class HealpixROIBase(object): + def active_pixels(self, nside, system=_EQUATORIAL, ordering=_RING): """ Returns the non-zero elements, i.e., the pixels selected according to this Region Of Interest @@ -23,14 +23,9 @@ def active_pixels(self, nside, system=_EQUATORIAL, ordering=_RING): assert system == _EQUATORIAL, "%s reference system not supported" % system - assert ordering in [ - _RING, - _NESTED, - ], "Could not understand ordering %s. Must be %s or %s" % ( - ordering, - _RING, - _NESTED, - ) + assert ordering in [_RING, _NESTED], "Could not understand ordering %s. Must be %s or %s" % (ordering, + _RING, + _NESTED) return self._active_pixels(nside, ordering) diff --git a/hawc_hal/response/__init__.py b/hawc_hal/response/__init__.py index 698350e..cb4a892 100644 --- a/hawc_hal/response/__init__.py +++ b/hawc_hal/response/__init__.py @@ -1,2 +1,2 @@ from __future__ import absolute_import -from .response import hawc_response_factory +from .response import hawc_response_factory \ No newline at end of file diff --git a/hawc_hal/response/response_bin.py b/hawc_hal/response/response_bin.py index 057e7da..06297f6 100644 --- a/hawc_hal/response/response_bin.py +++ b/hawc_hal/response/response_bin.py @@ -112,20 +112,15 @@ def log_log_spectrum(log_energy: float, parameters: np.ndarray): return ( np.log10(parameters[0]) - parameters[1] * log_energy - - np.log10(np.exp(1.0)) - * np.power(10.0, log_energy - np.log10(parameters[2])) + - np.log10(np.exp(1.0)) * np.power(10.0, log_energy - np.log10(parameters[2])) ) else: raise ValueError("Unknown spectral shape.") - this_en_sig_th1d = cls._get_en_th1d( - root_file, dec_id, analysis_bin_id, prefix="Sig" - ) + this_en_sig_th1d = cls._get_en_th1d(root_file, dec_id, analysis_bin_id, prefix="Sig") - this_en_bg_th1d = cls._get_en_th1d( - root_file, dec_id, analysis_bin_id, prefix="Bg" - ) + this_en_bg_th1d = cls._get_en_th1d(root_file, dec_id, analysis_bin_id, prefix="Bg") total_bins = this_en_sig_th1d.shape[0] sim_energy_bin_low = np.zeros(total_bins) @@ -168,15 +163,11 @@ def log_log_spectrum(log_energy: float, parameters: np.ndarray): try: psf_prefix = f"dec_{dec_id:02d}/nh_{analysis_bin_id}" - psf_tf1_metadata = root_file[ - f"{psf_prefix}/PSF_dec{dec_id}_nh{analysis_bin_id}_fit" - ] + psf_tf1_metadata = root_file[f"{psf_prefix}/PSF_dec{dec_id}_nh{analysis_bin_id}_fit"] except uproot.KeyInFileError: psf_prefix = f"dec_{dec_id:02d}/nh_0{analysis_bin_id}" - psf_tf1_metadata = root_file[ - f"{psf_prefix}/PSF_dec{dec_id}_nh{analysis_bin_id}_fit" - ] + psf_tf1_metadata = root_file[f"{psf_prefix}/PSF_dec{dec_id}_nh{analysis_bin_id}_fit"] psf_tf1_fparams = psf_tf1_metadata.member("fParams") @@ -250,9 +241,7 @@ def combine_with_weights(self, other_response_bin, dec_center, w1, w2): n_sim_signal_events = ( w1 * self._sim_n_sig_events + w2 * other_response_bin._sim_n_sig_events ) - n_sim_bkg_events = ( - w1 * self._sim_n_bg_events + w2 * other_response_bin._sim_n_bg_events - ) + n_sim_bkg_events = w1 * self._sim_n_bg_events + w2 * other_response_bin._sim_n_bg_events # We assume that the bin centers are the same assert np.allclose( diff --git a/hawc_hal/serialize.py b/hawc_hal/serialize.py index f85cb58..fa1b196 100644 --- a/hawc_hal/serialize.py +++ b/hawc_hal/serialize.py @@ -5,7 +5,8 @@ # This object is to decouple the serialization from any particular implementation. At the moment we use HDF5 through # pandas but this might change in the future. Without changing the external API, only changes here will be necessary. class Serialization(object): - def __init__(self, filename, mode="r", compress=True): + + def __init__(self, filename, mode='r', compress=True): self._filename = filename self._compress = compress @@ -15,9 +16,7 @@ def __enter__(self): if self._compress: - self._store = HDFStore( - self._filename, complib="blosc:lz4", complevel=9, mode=self._mode - ) + self._store = HDFStore(self._filename, complib='blosc:lz4', complevel=9, mode=self._mode) else: # pragma: no cover @@ -36,7 +35,7 @@ def keys(self): def store_pandas_object(self, path, obj, **metadata): - self._store.put(path, obj, format="fixed") + self._store.put(path, obj, format='fixed') self._store.get_storer(path).attrs.metadata = metadata diff --git a/hawc_hal/sphere_dist.py b/hawc_hal/sphere_dist.py index db0fd24..ec4ca6c 100644 --- a/hawc_hal/sphere_dist.py +++ b/hawc_hal/sphere_dist.py @@ -28,7 +28,7 @@ def sphere_dist(ra1, dec1, ra2, dec2): # pragma: no cover dlon = lon2 - lon1 dlat = lat2 - lat1 - a = np.sin(dlat / 2.0) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2.0) ** 2 + a = np.sin(dlat/2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon /2.0)**2 c = 2 * np.arcsin(np.sqrt(a)) return c * rad2deg diff --git a/hawc_hal/tests/conftest.py b/hawc_hal/tests/conftest.py index 5ece37c..f3706a0 100644 --- a/hawc_hal/tests/conftest.py +++ b/hawc_hal/tests/conftest.py @@ -77,12 +77,8 @@ def check_map_trees(m1, m2): p1 = m1[p_key] p2 = m2[p_key] - assert np.allclose( - p1.observation_map.as_partial(), p2.observation_map.as_partial() - ) - assert np.allclose( - p1.background_map.as_partial(), p2.background_map.as_partial() - ) + assert np.allclose(p1.observation_map.as_partial(), p2.observation_map.as_partial()) + assert np.allclose(p1.background_map.as_partial(), p2.background_map.as_partial()) assert p1.nside == p2.nside assert p1.n_transits == p2.n_transits @@ -132,9 +128,7 @@ def check_responses(r1, r2): rb1.sim_differential_photon_fluxes, rb2.sim_differential_photon_fluxes ) - assert np.allclose( - rb1.sim_signal_events_per_bin, rb2.sim_signal_events_per_bin - ) + assert np.allclose(rb1.sim_signal_events_per_bin, rb2.sim_signal_events_per_bin) # Test PSF assert np.allclose(rb1.psf.xs, rb2.psf.xs) diff --git a/hawc_hal/tests/test_bilinar_interpolation.py b/hawc_hal/tests/test_bilinar_interpolation.py index d7e11e1..e795588 100644 --- a/hawc_hal/tests/test_bilinar_interpolation.py +++ b/hawc_hal/tests/test_bilinar_interpolation.py @@ -15,15 +15,11 @@ def test_fast_bilinear_interpolation(): new_y = np.random.uniform(min(gridy), max(gridy), 500) new_coords = np.asarray(cartesian([new_x, new_y])) - mfi = fast_bilinar_interpolation.FastBilinearInterpolation( - data.shape, (new_coords[:, 0], new_coords[:, 1]) - ) + mfi = fast_bilinar_interpolation.FastBilinearInterpolation(data.shape, (new_coords[:, 0], new_coords[:, 1])) v1 = mfi(data) # Check against the slower scipy interpolator in map_coordinates - v2 = scipy.ndimage.map_coordinates( - data, np.array((new_coords[:, 0], new_coords[:, 1])), order=1 - ) + v2 = scipy.ndimage.map_coordinates(data, np.array((new_coords[:, 0], new_coords[:, 1])), order=1) assert np.allclose(v1, v2) diff --git a/hawc_hal/tests/test_complete_analysis.py b/hawc_hal/tests/test_complete_analysis.py index d775330..3c66558 100644 --- a/hawc_hal/tests/test_complete_analysis.py +++ b/hawc_hal/tests/test_complete_analysis.py @@ -6,12 +6,15 @@ from conftest import point_source_model -@pytest.fixture(scope="module") +@pytest.fixture(scope='module') def test_fit(roi, maptree, response, point_source_model): pts_model = point_source_model - hawc = HAL("HAWC", maptree, response, roi) + hawc = HAL("HAWC", + maptree, + response, + roi) # Use from bin 1 to bin 9 hawc.set_active_measurements(1, 9) @@ -34,7 +37,7 @@ def test_fit(roi, maptree, response, point_source_model): def test_simulation(test_fit): jl, hawc, pts_model, param_df, like_df, data = test_fit - + sim = hawc.get_simulated_dataset("HAWCsim") sim.write("sim_resp.hd5", "sim_maptree.hd5") @@ -78,28 +81,21 @@ def test_goodness(test_fit): gf = GoodnessOfFit(jl) gof, param, likes = gf.by_mc(10) - print( - "Prob. of obtaining -log(like) >= observed by chance if null hypothesis is true: %.2f" - % gof["HAWC"] - ) + print("Prob. of obtaining -log(like) >= observed by chance if null hypothesis is true: %.2f" % gof['HAWC']) # it is a good idea to inspect the results of the simulations with some plots # Histogram of likelihood values fig, sub = plt.subplots() likes.hist(ax=sub) # Overplot a vertical dashed line on the observed value - plt.axvline( - jl.results.get_statistic_frame().loc["HAWC", "-log(likelihood)"], - color="black", - linestyle="--", - ) + plt.axvline(jl.results.get_statistic_frame().loc['HAWC', '-log(likelihood)'], + color='black', + linestyle='--') fig.savefig("hal_sim_all_likes.png") # Plot the value of beta for all simulations (for example) fig, sub = plt.subplots() - param.loc[ - (slice(None), ["pts.spectrum.main.Cutoff_powerlaw.index"]), "value" - ].plot() + param.loc[(slice(None), ['pts.spectrum.main.Cutoff_powerlaw.index']), 'value'].plot() fig.savefig("hal_sim_all_index.png") @@ -107,7 +103,7 @@ def test_fit_with_free_position(test_fit): jl, hawc, pts_model, param_df, like_df, data = test_fit - hawc.psf_integration_method = "fast" + hawc.psf_integration_method = 'fast' # Free the position of the source pts_model.pts.position.ra.free = True @@ -130,26 +126,17 @@ def test_fit_with_free_position(test_fit): # pts.position.dec(2.214 + / - 0.00025) x # 10 - a, b, cc, fig = jl.get_contours( - pts_model.pts.position.dec, - 22.13, - 22.1525, - 5, - pts_model.pts.position.ra, - 83.615, - 83.635, - 5, - ) - - plt.plot([ra], [dec], "x") + a, b, cc, fig = jl.get_contours(pts_model.pts.position.dec, 22.13, 22.1525, 5, + pts_model.pts.position.ra, 83.615, 83.635, 5) + + plt.plot([ra], [dec], 'x') fig.savefig("hal_src_localization.png") - hawc.psf_integration_method = "exact" + hawc.psf_integration_method = 'exact' pts_model.pts.position.ra.free = False pts_model.pts.position.dec.free = False - def test_bayesian_analysis(test_fit): jl, hawc, pts_model, param_df, like_df, data = test_fit @@ -181,3 +168,4 @@ def test_bayesian_analysis(test_fit): fig = bs.results.corner_plot() fig.savefig("hal_corner_plot.png") + diff --git a/hawc_hal/tests/test_copy.py b/hawc_hal/tests/test_copy.py index fe98933..c42cb16 100644 --- a/hawc_hal/tests/test_copy.py +++ b/hawc_hal/tests/test_copy.py @@ -7,15 +7,14 @@ from conftest import maptree, response - def deepcopy_hal(theMaptree, theResponse, extended=False): src_ra, src_dec = 82.628, 22.640 - src_name = "test_source" + src_name = 'test_source' - roi = HealpixConeROI(data_radius=5.0, model_radius=8.0, ra=src_ra, dec=src_dec) + roi = HealpixConeROI(data_radius=5., model_radius=8., ra=src_ra, dec=src_dec) - hawc = HAL("HAWC", theMaptree, theResponse, roi) + hawc = HAL('HAWC', theMaptree, theResponse, roi) hawc.set_active_measurements(1, 9) data = DataList(hawc) @@ -33,11 +32,9 @@ def deepcopy_hal(theMaptree, theResponse, extended=False): hawc_copy = copy.deepcopy(hawc) - def test_deepcopy_point_source(maptree, response): deepcopy_hal(maptree, response, extended=False) - -# @pytest.mark.xfail +#@pytest.mark.xfail def test_deepcopy_extended_source(maptree, response): deepcopy_hal(maptree, response, extended=True) diff --git a/hawc_hal/tests/test_geminga_paper.py b/hawc_hal/tests/test_geminga_paper.py index 14feb13..ca6d44b 100644 --- a/hawc_hal/tests/test_geminga_paper.py +++ b/hawc_hal/tests/test_geminga_paper.py @@ -4,7 +4,6 @@ try: import ROOT - ROOT.PyConfig.IgnoreCommandLineOptions = True except: pass @@ -14,26 +13,11 @@ from collections import namedtuple import numpy as np - def test_geminga_paper(geminga_maptree, geminga_response): - Args_fake = namedtuple( - "args", "mtfile,rsfile,startBin,stopBin,RA,Dec,uratio,delta,ROI,output,plugin" - ) - - args = Args_fake( - geminga_maptree, - geminga_response, - 1, - 9, - 98.5, - 17.76, - 1.12, - 0.3333, - 0, - "output.txt", - "new", - ) + Args_fake = namedtuple('args', 'mtfile,rsfile,startBin,stopBin,RA,Dec,uratio,delta,ROI,output,plugin') + + args = Args_fake(geminga_maptree, geminga_response, 1, 9, 98.5, 17.76, 1.12, 0.3333, 0, 'output.txt', 'new') param, like_df, TS = go(args) @@ -43,11 +27,9 @@ def test_geminga_paper(geminga_maptree, geminga_response): # B0656.spectrum.main.Powerlaw.K (6.0 -1.3 +1.7) x 10^-24 1 / (cm2 keV s) # B0656.spectrum.main.Powerlaw.index -2.140 +/- 0.17 - assert np.allclose( - param.loc[:, "value"].values, - [5.38278446e00, 1.40122099e-23, -2.34261469e00, 5.98438658e-24, -2.13846297e00], - rtol=5e-2, - ) + assert np.allclose(param.loc[:, 'value'].values, + [5.38278446e+00, 1.40122099e-23, -2.34261469e+00, 5.98438658e-24, -2.13846297e+00], + rtol=5e-2) def go(args): @@ -55,9 +37,11 @@ def go(args): spectrum = Powerlaw() shape = Continuous_injection_diffusion_legacy() - source = ExtendedSource("Geminga", spatial_shape=shape, spectral_shape=spectrum) + source = ExtendedSource("Geminga", + spatial_shape=shape, + spectral_shape=spectrum) - fluxUnit = 1.0 / (u.TeV * u.cm**2 * u.s) + fluxUnit = 1. / (u.TeV * u.cm ** 2 * u.s) # Set spectral parameters spectrum.K = 1e-14 * fluxUnit @@ -67,7 +51,7 @@ def go(args): spectrum.piv.fix = True spectrum.index = -2.4 - spectrum.index.bounds = (-4.0, -1.0) + spectrum.index.bounds = (-4., -1.) # Set spatial parameters shape.lon0 = args.RA * u.degree @@ -78,7 +62,7 @@ def go(args): shape.rdiff0 = 6.0 * u.degree shape.rdiff0.fix = False - shape.rdiff0.max_value = 12.0 + shape.rdiff0.max_value = 12. shape.delta.min_value = 0.1 shape.delta = args.delta @@ -96,9 +80,11 @@ def go(args): spectrum2 = Powerlaw() shape2 = Continuous_injection_diffusion_legacy() - source2 = ExtendedSource("B0656", spatial_shape=shape2, spectral_shape=spectrum2) + source2 = ExtendedSource("B0656", + spatial_shape=shape2, + spectral_shape=spectrum2) - fluxUnit = 1.0 / (u.TeV * u.cm**2 * u.s) + fluxUnit = 1. / (u.TeV * u.cm ** 2 * u.s) # Set spectral parameters for the 2nd source spectrum2.K = 1e-14 * fluxUnit @@ -109,7 +95,7 @@ def go(args): spectrum2.index = -2.2 # spectrum2.index.fix = True - spectrum2.index.bounds = (-4.0, -1.0) + spectrum2.index.bounds = (-4., -1.) # Set spatial parameters for the 2nd source shape2.lon0 = 104.95 * u.degree @@ -120,7 +106,7 @@ def go(args): shape2.rdiff0 = 6.0 * u.degree shape2.rdiff0.fix = False - shape2.rdiff0.max_value = 12.0 + shape2.rdiff0.max_value = 12. shape2.delta.min_value = 0.2 shape2.delta = args.delta @@ -146,7 +132,7 @@ def go(args): ra_c, dec_c, rad = (None, None, None) if args.ROI == 0: - ra_c, dec_c, rad = 101.7, 16, 9.0 + ra_c, dec_c, rad = 101.7, 16, 9. # llh.set_ROI(101.7, 16, 9., True) elif args.ROI == 1: ra_c, dec_c, rad = 98.5, 17.76, 4.5 @@ -155,13 +141,13 @@ def go(args): ra_c, dec_c, rad = 97, 18.5, 6 # llh.set_ROI(97, 18.5, 6, True) elif args.ROI == 3: - ra_c, dec_c, rad = 104.95, 14.24, 3.0 + ra_c, dec_c, rad = 104.95, 14.24, 3. # llh.set_ROI(104.95, 14.24, 3., True) elif args.ROI == 4: ra_c, dec_c, rad = 107, 13, 5.4 # llh.set_ROI(107, 13, 5.4, True) - if args.plugin == "old": + if args.plugin == 'old': llh = HAWCLike("Geminga", args.mtfile, args.rsfile, fullsky=True) llh.set_active_measurements(args.startBin, args.stopBin) @@ -169,20 +155,25 @@ def go(args): else: - roi = HealpixConeROI( - data_radius=rad, model_radius=rad + 10.0, ra=ra_c, dec=dec_c - ) - - llh = HAL("HAWC", args.mtfile, args.rsfile, roi) - + roi = HealpixConeROI(data_radius=rad, + model_radius=rad + 10.0, + ra=ra_c, dec=dec_c) + + llh = HAL("HAWC", + args.mtfile, + args.rsfile, + roi) + llh.set_active_measurements(args.startBin, args.stopBin) print(lm) # we fit a common diffusion coefficient so parameters are linked if "B0656" in lm and "Geminga" in lm: - law = Line(b=250.0 / 288.0, a=0.0) - lm.link(lm.B0656.spatial_shape.rdiff0, lm.Geminga.spatial_shape.rdiff0, law) + law = Line(b=250. / 288., a=0.) + lm.link(lm.B0656.spatial_shape.rdiff0, + lm.Geminga.spatial_shape.rdiff0, + law) lm.B0656.spatial_shape.rdiff0.Line.a.fix = True lm.B0656.spatial_shape.rdiff0.Line.b.fix = True @@ -191,10 +182,10 @@ def go(args): print(lm) # Set up the likelihood and run the fit - TS = 0.0 + TS = 0. try: - + lm.Geminga.spatial_shape.rdiff0 = 5.5 lm.Geminga.spatial_shape.rdiff0.fix = False lm.Geminga.spectrum.main.Powerlaw.K = 1.36e-23 @@ -227,23 +218,23 @@ def go(args): # Print the TS, significance, and fit parameters, and then plot stuff print("\nTest statistic:") - if args.plugin == "old": + if args.plugin == 'old': TS = llh.calc_TS() else: - + if "B0656" in lm and "Geminga" in lm: lm.unlink(lm.B0656.spatial_shape.rdiff0) - + TS_df = jl.compute_TS("Geminga", like_df) - TS = TS_df.loc["HAWC", "TS"] - + TS = TS_df.loc['HAWC', 'TS'] + TS_df2 = jl.compute_TS("B0656", like_df) - TS2 = TS_df2.loc["HAWC", "TS"] - - print(TS_df) - print(TS_df2) + TS2 = TS_df2.loc['HAWC', 'TS'] + + print (TS_df) + print (TS_df2) print("Test statistic: %g" % TS) @@ -273,73 +264,27 @@ def go(args): if __name__ == "__main__": p = argparse.ArgumentParser(description="Example spectral fit with LiFF") - p.add_argument( - "-m", - "--maptreefile", - dest="mtfile", - help="LiFF MapTree ROOT file", - default="./maptree.root", - ) - p.add_argument( - "-r", - "--responsefile", - dest="rsfile", - help="LiFF detector response ROOT file", - default="./response.root", - ) - p.add_argument( - "--startBin", - dest="startBin", - default=1, - type=int, - help="Starting analysis bin [0..9]", - ) - p.add_argument( - "--stopBin", - dest="stopBin", - default=9, - type=int, - help="Stopping analysis bin [0..9]", - ) - p.add_argument( - "--RA", default=98.5, type=float, help="Source RA in degrees (Geminga default)" - ) - p.add_argument( - "--Dec", - default=17.76, - type=float, - help="Source Dec in degrees (Geminga default)", - ) - p.add_argument( - "--uratio", - dest="uratio", - default=1.12, - type=float, - help="the ratio of energy density between CMB and B. 1.12 means B=3uG and CMB=0.25", - ) - p.add_argument( - "--delta", - dest="delta", - default=0.3333, - type=float, - help="Diffusion spectral index (0.3 to 0.6)", - ) + p.add_argument("-m", "--maptreefile", dest="mtfile", + help="LiFF MapTree ROOT file", default="./maptree.root") + p.add_argument("-r", "--responsefile", dest="rsfile", + help="LiFF detector response ROOT file", default="./response.root") + p.add_argument("--startBin", dest="startBin", default=1, type=int, + help="Starting analysis bin [0..9]") + p.add_argument("--stopBin", dest="stopBin", default=9, type=int, + help="Stopping analysis bin [0..9]") + p.add_argument("--RA", default=98.5, type=float, + help="Source RA in degrees (Geminga default)") + p.add_argument("--Dec", default=17.76, type=float, + help="Source Dec in degrees (Geminga default)") + p.add_argument("--uratio", dest="uratio", default=1.12, type=float, + help="the ratio of energy density between CMB and B. 1.12 means B=3uG and CMB=0.25") + p.add_argument("--delta", dest="delta", default=0.3333, type=float, + help="Diffusion spectral index (0.3 to 0.6)") p.add_argument("--ROI", dest="ROI", default=0, type=int) - p.add_argument( - "-o", - "--output", - dest="output", - default="output.txt", - help="Parameter output file.", - ) - p.add_argument( - "--plugin", - dest="plugin", - default="old", - type=str, - help="Old or new", - choices=["new", "old"], - ) + p.add_argument("-o", "--output", dest="output", default="output.txt", + help="Parameter output file.") + p.add_argument("--plugin", dest='plugin', default='old', type=str, + help="Old or new", choices=['new', 'old']) args = p.parse_args() diff --git a/hawc_hal/tests/test_healpixRoi.py b/hawc_hal/tests/test_healpixRoi.py index c8682a5..4cda5da 100644 --- a/hawc_hal/tests/test_healpixRoi.py +++ b/hawc_hal/tests/test_healpixRoi.py @@ -12,91 +12,86 @@ from conftest import check_map_trees -def Sky2Vec(ra, dec): - c = SkyCoord(frame="icrs", ra=ra * u.degree, dec=dec * u.degree) - theta = (90.0 * u.degree - c.dec).to(u.radian).value - phi = c.ra.to(u.radian).value - vec = hp.pixelfunc.ang2vec(theta, phi) - return vec - - -NSIDE = 512 - - -def test_healpixRoi(geminga_maptree, geminga_response): - - # test to make sure writing a model with HealpixMapROI works fine - ra, dec = 101.7, 16.0 - data_radius = 9.0 - model_radius = 24.0 - - m = np.zeros(hp.nside2npix(NSIDE)) - vec = Sky2Vec(ra, dec) - m[ - hp.query_disc( - NSIDE, vec, (data_radius * u.degree).to(u.radian).value, inclusive=False - ) - ] = 1 - - # hp.fitsfunc.write_map("roitemp.fits" , m, nest=False, coord="C", partial=False, overwrite=True ) - - map_roi = HealpixMapROI( - data_radius=data_radius, ra=ra, dec=dec, model_radius=model_radius, roimap=m - ) - # fits_roi = HealpixMapROI(data_radius=data_radius, ra=ra, dec=dec, model_radius=model_radius, roifile="roitemp.fits") - hawc = HAL("HAWC", geminga_maptree, geminga_response, map_roi) - hawc.set_active_measurements(1, 9) - - """ +def Sky2Vec( ra, dec ): + c = SkyCoord( frame = "icrs", ra=ra*u.degree, dec=dec*u.degree ) + theta = (90.0*u.degree-c.dec).to(u.radian).value + phi = c.ra.to(u.radian).value + vec = hp.pixelfunc.ang2vec(theta, phi) + return vec + +NSIDE=512 + +def test_healpixRoi(geminga_maptree,geminga_response): + + #test to make sure writing a model with HealpixMapROI works fine + ra, dec = 101.7, 16. + data_radius = 9. + model_radius = 24. + + m = np.zeros(hp.nside2npix(NSIDE)) + vec = Sky2Vec(ra, dec) + m[hp.query_disc(NSIDE, vec, (data_radius*u.degree).to(u.radian).value, inclusive=False)] = 1 + + #hp.fitsfunc.write_map("roitemp.fits" , m, nest=False, coord="C", partial=False, overwrite=True ) + + map_roi = HealpixMapROI(data_radius=data_radius, ra=ra, dec=dec, model_radius=model_radius, roimap=m) + #fits_roi = HealpixMapROI(data_radius=data_radius, ra=ra, dec=dec, model_radius=model_radius, roifile="roitemp.fits") + hawc = HAL("HAWC",geminga_maptree,geminga_response,map_roi) + hawc.set_active_measurements(1,9) + + ''' Define model: Two sources, 1 point, 1 extended Same declination, but offset in RA Different spectral idnex, but both power laws - """ - pt_shift = 3.0 - ext_shift = 2.0 - - # First soource - spectrum1 = Powerlaw() - source1 = PointSource("point", ra=ra + pt_shift, dec=dec, spectral_shape=spectrum1) - - spectrum1.K = 1e-12 / (u.TeV * u.cm**2 * u.s) - spectrum1.piv = 1 * u.TeV - spectrum1.index = -2.3 - - spectrum1.piv.fix = True - spectrum1.K.fix = True - spectrum1.index.fix = True - - # Second source - shape = Gaussian_on_sphere(lon0=ra - ext_shift, lat0=dec, sigma=0.3) - spectrum2 = Powerlaw() - source2 = ExtendedSource("extended", spatial_shape=shape, spectral_shape=spectrum2) - - spectrum2.K = 1e-12 / (u.TeV * u.cm**2 * u.s) - spectrum2.piv = 1 * u.TeV - spectrum2.index = -2.0 - - spectrum2.piv.fix = True - spectrum2.K.fix = True - spectrum2.index.fix = True - - shape.lon0.fix = True - shape.lat0.fix = True - shape.sigma.fix = True - - model = Model(source1, source2) - - hawc.set_model(model) - - # Write the model map - model_map_tree = hawc.write_model_map("test.hd5", test_return_map=True) - - # Read the model back - hawc_model = map_tree_factory("test.hd5", map_roi) - - # Check written model and read model are the same - check_map_trees(hawc_model, model_map_tree) - - os.remove("test.hd5") + ''' + pt_shift=3.0 + ext_shift=2.0 + + # First soource + spectrum1 = Powerlaw() + source1 = PointSource("point", ra=ra+pt_shift,dec=dec,spectral_shape=spectrum1) + + spectrum1.K = 1e-12 / (u.TeV * u.cm **2 * u.s) + spectrum1.piv = 1* u.TeV + spectrum1.index = -2.3 + + spectrum1.piv.fix = True + spectrum1.K.fix = True + spectrum1.index.fix = True + + # Second source + shape = Gaussian_on_sphere(lon0=ra - ext_shift,lat0=dec,sigma=0.3) + spectrum2 = Powerlaw() + source2 = ExtendedSource("extended",spatial_shape=shape,spectral_shape=spectrum2) + + spectrum2.K = 1e-12 / (u.TeV * u.cm **2 * u.s) + spectrum2.piv = 1* u.TeV + spectrum2.index = -2.0 + + spectrum2.piv.fix = True + spectrum2.K.fix = True + spectrum2.index.fix = True + + shape.lon0.fix = True + shape.lat0.fix = True + shape.sigma.fix = True + + model = Model(source1,source2) + + hawc.set_model(model) + + # Write the model map + model_map_tree=hawc.write_model_map("test.hd5",test_return_map=True) + + # Read the model back + hawc_model = map_tree_factory('test.hd5',map_roi) + + # Check written model and read model are the same + check_map_trees(hawc_model,model_map_tree) + + + os.remove( "test.hd5" ) + + diff --git a/hawc_hal/tests/test_maptree.py b/hawc_hal/tests/test_maptree.py index 4e7ccb3..4e60f04 100644 --- a/hawc_hal/tests/test_maptree.py +++ b/hawc_hal/tests/test_maptree.py @@ -5,7 +5,8 @@ from conftest import check_map_trees -def test_constructor(maptree, roi): +def test_constructor(maptree, + roi): roi_ = roi @@ -25,8 +26,8 @@ def test_constructor(maptree, roi): # Check corner cases # This should issue a warning because we saved the maptree with a ROI and we try to use # without one - - # with pytest.warns(UserWarning): + + #with pytest.warns(UserWarning): _ = map_tree_factory(test_filename, None) # Now try to load with a different ROI than the one used for the file @@ -38,12 +39,10 @@ def test_constructor(maptree, roi): _ = map_tree_factory(test_filename, oroi) # This instead should work because the ROI is different, but contained in the ROI of the file - smaller_roi = HealpixConeROI( - data_radius=roi.data_radius / 2.0, - model_radius=roi.model_radius, - ra=ra_c - 0.2, - dec=dec_c + 0.15, - ) + smaller_roi = HealpixConeROI(data_radius=roi.data_radius / 2.0, + model_radius=roi.model_radius, + ra=ra_c - 0.2, + dec=dec_c + 0.15) _ = map_tree_factory(test_filename, smaller_roi) diff --git a/hawc_hal/tests/test_model_residual_maps.py b/hawc_hal/tests/test_model_residual_maps.py index 8d5067f..b0df547 100644 --- a/hawc_hal/tests/test_model_residual_maps.py +++ b/hawc_hal/tests/test_model_residual_maps.py @@ -21,16 +21,15 @@ not has_root, reason="No ROOT available" ) - @skip_if_ROOT_is_not_available def test_model_residual_maps(geminga_maptree, geminga_response, geminga_roi): - # data_radius = 5.0 - # model_radius = 7.0 + #data_radius = 5.0 + #model_radius = 7.0 output = dirname(geminga_maptree) ra_src, dec_src = 101.7, 16.0 - maptree, response, roi = geminga_maptree, geminga_response, geminga_roi + maptree, response, roi = geminga_maptree, geminga_response, geminga_roi hawc = HAL("HAWC", maptree, response, roi) @@ -40,24 +39,22 @@ def test_model_residual_maps(geminga_maptree, geminga_response, geminga_roi): # Display information about the data loaded and the ROI hawc.display() - """ + ''' Define model: Two sources, 1 point, 1 extended Same declination, but offset in RA Different spectral index, but both power laws - """ - pt_shift = 3.0 + ''' + pt_shift=3.0 ext_shift = 2.0 # First source spectrum1 = Powerlaw() - source1 = PointSource( - "point", ra=ra_src + pt_shift, dec=dec_src, spectral_shape=spectrum1 - ) + source1 = PointSource("point", ra=ra_src + pt_shift, dec=dec_src, spectral_shape=spectrum1) - spectrum1.K = 1e-12 / (u.TeV * u.cm**2 * u.s) + spectrum1.K = 1e-12 / (u.TeV * u.cm ** 2 * u.s) spectrum1.piv = 1 * u.TeV spectrum1.index = -2.3 @@ -70,13 +67,13 @@ def test_model_residual_maps(geminga_maptree, geminga_response, geminga_roi): spectrum2 = Powerlaw() source2 = ExtendedSource("extended", spatial_shape=shape, spectral_shape=spectrum2) - spectrum2.K = 1e-12 / (u.TeV * u.cm**2 * u.s) + spectrum2.K = 1e-12 / (u.TeV * u.cm ** 2 * u.s) spectrum2.piv = 1 * u.TeV - spectrum2.index = -2.0 + spectrum2.index = -2.0 - shape.lon0.fix = True - shape.lat0.fix = True - shape.sigma.fix = True + shape.lon0.fix=True + shape.lat0.fix=True + shape.sigma.fix=True spectrum2.piv.fix = True spectrum2.K.fix = True @@ -91,7 +88,7 @@ def test_model_residual_maps(geminga_maptree, geminga_response, geminga_roi): # Define the JointLikelihood object (glue the data to the model) jl = JointLikelihood(model, data, verbose=False) - # This has the effect of loading the model cache + # This has the effect of loading the model cache fig = hawc.display_spectrum() # the test file names @@ -99,16 +96,13 @@ def test_model_residual_maps(geminga_maptree, geminga_response, geminga_roi): residual_file_name = "{0}/test_residual.hdf5".format(output) # Write the map trees for testing - model_map_tree = hawc.write_model_map( - model_file_name, poisson_fluctuate=True, test_return_map=True - ) - residual_map_tree = hawc.write_residual_map( - residual_file_name, test_return_map=True - ) + model_map_tree = hawc.write_model_map(model_file_name, poisson_fluctuate=True, test_return_map=True) + residual_map_tree = hawc.write_residual_map(residual_file_name, test_return_map=True) # Read the maps back in - hawc_model = map_tree_factory(model_file_name, roi) - hawc_residual = map_tree_factory(residual_file_name, roi) + hawc_model = map_tree_factory(model_file_name,roi) + hawc_residual = map_tree_factory(residual_file_name,roi) + check_map_trees(hawc_model, model_map_tree) check_map_trees(hawc_residual, residual_map_tree) diff --git a/hawc_hal/tests/test_n_transits.py b/hawc_hal/tests/test_n_transits.py index ff82806..7a26400 100644 --- a/hawc_hal/tests/test_n_transits.py +++ b/hawc_hal/tests/test_n_transits.py @@ -19,7 +19,7 @@ def test_transits(maptree, roi): # Case 1: specify number of transits n_transits = 777.7 - maptree_ntransits = map_tree_factory(maptree, roi_, n_transits) + maptree_ntransits = map_tree_factory(maptree,roi_,n_transits) # does the maptree return the specified transits? check_n_transits(maptree_ntransits, n_transits) @@ -29,3 +29,5 @@ def test_transits(maptree, roi): n_transits = maptree_unspecifed.n_transits check_n_transits(maptree_unspecifed, n_transits) + + \ No newline at end of file diff --git a/hawc_hal/tests/test_on_point_source.py b/hawc_hal/tests/test_on_point_source.py index b3b08ee..7cff27d 100644 --- a/hawc_hal/tests/test_on_point_source.py +++ b/hawc_hal/tests/test_on_point_source.py @@ -3,9 +3,17 @@ import argparse -def test_on_point_source(roi, maptree, response, point_source_model, liff=False): +def test_on_point_source(roi, + maptree, + response, + point_source_model, + liff=False): - fit_point_source(roi, maptree, response, point_source_model, liff=liff) + fit_point_source(roi, + maptree, + response, + point_source_model, + liff=liff) if __name__ == "__main__": @@ -22,40 +30,23 @@ def test_on_point_source(roi, maptree, response, point_source_model, liff=False) def_ra_roi, def_dec_roi = def_roi.ra_dec_center parser = argparse.ArgumentParser() - parser.add_argument( - "--liff", help="Use LIFF instead of HAL (for benchmarking)", action="store_true" - ) + parser.add_argument("--liff", help="Use LIFF instead of HAL (for benchmarking)", action="store_true") parser.add_argument("--ra", help="RA of source", type=float, default=def_ra) parser.add_argument("--dec", help="Dec of source", type=float, default=def_dec) - parser.add_argument( - "--ra_roi", help="RA of center of ROI", type=float, default=def_ra_roi - ) - parser.add_argument( - "--dec_roi", help="Dec of center of ROI", type=float, default=def_dec_roi - ) - parser.add_argument( - "--data_radius", help="Radius of the data ROI", type=float, default=def_drad - ) - parser.add_argument( - "--model_radius", help="Radius of the model ROI", type=float, default=def_mrad - ) + parser.add_argument("--ra_roi", help="RA of center of ROI", type=float, default=def_ra_roi) + parser.add_argument("--dec_roi", help="Dec of center of ROI", type=float, default=def_dec_roi) + parser.add_argument("--data_radius", help="Radius of the data ROI", type=float, default=def_drad) + parser.add_argument("--model_radius", help="Radius of the model ROI", type=float, default=def_mrad) parser.add_argument("--maptree", help="Maptree", type=str, default=maptree()) parser.add_argument("--response", help="Response", type=str, default=response()) - parser.add_argument( - "--free_position", - help="Use this to set the position free", - action="store_true", - default=False, - ) + parser.add_argument("--free_position", help='Use this to set the position free', action='store_true', default=False) args = parser.parse_args() - roi = HealpixConeROI( - data_radius=args.data_radius, - model_radius=args.model_radius, - ra=args.ra_roi, - dec=args.dec_roi, - ) + roi = HealpixConeROI(data_radius=args.data_radius, + model_radius=args.model_radius, + ra=args.ra_roi, + dec=args.dec_roi) pts_model = point_source_model(ra=args.ra, dec=args.dec) @@ -64,10 +55,8 @@ def test_on_point_source(roi, maptree, response, point_source_model, liff=False) pts_model.pts.position.ra.free = True pts_model.pts.position.dec.free = True - test_on_point_source( - roi, - liff=args.liff, - point_source_model=pts_model, - maptree=args.maptree, - response=args.response, - ) + test_on_point_source(roi, + liff=args.liff, + point_source_model=pts_model, + maptree=args.maptree, + response=args.response) diff --git a/hawc_hal/tests/test_psf.py b/hawc_hal/tests/test_psf.py index 833340e..47c63bc 100644 --- a/hawc_hal/tests/test_psf.py +++ b/hawc_hal/tests/test_psf.py @@ -3,7 +3,6 @@ import pytest from hawc_hal.psf_fast import InvalidPSF, InvalidPSFError - def test_invalid_psf(): psf = InvalidPSF() diff --git a/hawc_hal/tests/test_roi.py b/hawc_hal/tests/test_roi.py index d0ed25f..55e918b 100644 --- a/hawc_hal/tests/test_roi.py +++ b/hawc_hal/tests/test_roi.py @@ -9,65 +9,53 @@ import os -def Sky2Vec(ra, dec): - c = SkyCoord(frame="icrs", ra=ra * u.degree, dec=dec * u.degree) - theta = (90.0 * u.degree - c.dec).to(u.radian).value - phi = c.ra.to(u.radian).value - vec = hp.pixelfunc.ang2vec(theta, phi) - return vec - - -NSIDE = 512 +def Sky2Vec( ra, dec ): + c = SkyCoord( frame = "icrs", ra=ra*u.degree, dec=dec*u.degree ) + theta = (90.0*u.degree-c.dec).to(u.radian).value + phi = c.ra.to(u.radian).value + vec = hp.pixelfunc.ang2vec(theta, phi) + return vec +NSIDE=512 def test_rois(): - # test to make sure ConeROI and MapROI with a simple, circular active region return the same active pixels. - - ra, dec = 100, 30 - data_radius = 10 - model_radius = 15 - - cone_roi = HealpixConeROI( - data_radius=data_radius, model_radius=model_radius, ra=ra, dec=dec - ) - - m = np.zeros(hp.nside2npix(NSIDE)) - vec = Sky2Vec(ra, dec) - m[ - hp.query_disc( - NSIDE, vec, (data_radius * u.degree).to(u.radian).value, inclusive=False - ) - ] = 1 - - hp.fitsfunc.write_map( - "roitemp.fits", m, nest=False, coord="C", partial=False, overwrite=True - ) - - map_roi = HealpixMapROI( - data_radius=data_radius, ra=ra, dec=dec, model_radius=model_radius, roimap=m - ) - fits_roi = HealpixMapROI( - data_radius=data_radius, - ra=ra, - dec=dec, - model_radius=model_radius, - roifile="roitemp.fits", - ) - - assert np.all(cone_roi.active_pixels(NSIDE) == map_roi.active_pixels(NSIDE)) - assert np.all(fits_roi.active_pixels(NSIDE) == map_roi.active_pixels(NSIDE)) - - os.remove("roitemp.fits") - # test that all is still good after saving the ROIs to dictionaries and restoring. - cone_dict = cone_roi.to_dict() - map_dict = map_roi.to_dict() - fits_dict = fits_roi.to_dict() - - cone_roi2 = get_roi_from_dict(cone_dict) - map_roi2 = get_roi_from_dict(map_dict) - fits_roi2 = get_roi_from_dict(fits_dict) - - assert np.all(cone_roi2.active_pixels(NSIDE) == map_roi.active_pixels(NSIDE)) - assert np.all(fits_roi2.active_pixels(NSIDE) == map_roi.active_pixels(NSIDE)) - assert np.all(map_roi2.active_pixels(NSIDE) == map_roi.active_pixels(NSIDE)) + #test to make sure ConeROI and MapROI with a simple, circular active region return the same active pixels. + + ra, dec = 100, 30 + data_radius = 10 + model_radius = 15 + + cone_roi = HealpixConeROI(data_radius=data_radius, + model_radius=model_radius, + ra=ra, + dec=dec) + + m = np.zeros(hp.nside2npix(NSIDE)) + vec = Sky2Vec(ra, dec) + m[hp.query_disc(NSIDE, vec, (data_radius*u.degree).to(u.radian).value, inclusive=False)] = 1 + + hp.fitsfunc.write_map("roitemp.fits" , m, nest=False, coord="C", partial=False, overwrite=True ) + + + map_roi = HealpixMapROI(data_radius=data_radius, ra=ra, dec=dec, model_radius=model_radius, roimap=m) + fits_roi = HealpixMapROI(data_radius=data_radius, ra=ra, dec=dec, model_radius=model_radius, roifile="roitemp.fits") + + assert np.all( cone_roi.active_pixels(NSIDE) == map_roi.active_pixels(NSIDE)) + assert np.all( fits_roi.active_pixels(NSIDE) == map_roi.active_pixels(NSIDE)) + + os.remove( "roitemp.fits" ) + #test that all is still good after saving the ROIs to dictionaries and restoring. + cone_dict = cone_roi.to_dict() + map_dict = map_roi.to_dict() + fits_dict = fits_roi.to_dict() + + cone_roi2 = get_roi_from_dict( cone_dict ) + map_roi2 = get_roi_from_dict( map_dict ) + fits_roi2 = get_roi_from_dict( fits_dict ) + + assert np.all( cone_roi2.active_pixels(NSIDE) == map_roi.active_pixels(NSIDE)) + assert np.all( fits_roi2.active_pixels(NSIDE) == map_roi.active_pixels(NSIDE)) + assert np.all( map_roi2.active_pixels(NSIDE) == map_roi.active_pixels(NSIDE)) + + diff --git a/hawc_hal/tests/test_root_to_hdf.py b/hawc_hal/tests/test_root_to_hdf.py index d3d68f9..5520e7a 100644 --- a/hawc_hal/tests/test_root_to_hdf.py +++ b/hawc_hal/tests/test_root_to_hdf.py @@ -15,7 +15,6 @@ not has_root, reason="No ROOT available" ) - @skip_if_ROOT_is_not_available def test_root_to_hdf_response(response): @@ -36,8 +35,9 @@ def test_root_to_hdf_response(response): os.remove(test_filename) - -def do_one_test_maptree(geminga_roi, geminga_maptree, fullsky=False): +def do_one_test_maptree(geminga_roi, + geminga_maptree, + fullsky=False): # Test both with a defined ROI and full sky (ROI is None) if fullsky: @@ -64,12 +64,16 @@ def do_one_test_maptree(geminga_roi, geminga_maptree, fullsky=False): os.remove(test_filename) - @skip_if_ROOT_is_not_available -def test_root_to_hdf_maptree_roi(geminga_roi, geminga_maptree): - do_one_test_maptree(geminga_roi, geminga_maptree, fullsky=False) - +def test_root_to_hdf_maptree_roi(geminga_roi, + geminga_maptree): + do_one_test_maptree(geminga_roi, + geminga_maptree, + fullsky=False) @skip_if_ROOT_is_not_available -def test_root_to_hdf_maptree_full_sky(geminga_roi, geminga_maptree): - do_one_test_maptree(geminga_roi, geminga_maptree, fullsky=True) +def test_root_to_hdf_maptree_full_sky(geminga_roi, + geminga_maptree): + do_one_test_maptree(geminga_roi, + geminga_maptree, + fullsky=True) diff --git a/hawc_hal/tests/test_serialization.py b/hawc_hal/tests/test_serialization.py index 38f04f7..fe3d47c 100644 --- a/hawc_hal/tests/test_serialization.py +++ b/hawc_hal/tests/test_serialization.py @@ -8,28 +8,26 @@ def test_serialization(): my_obj = pd.Series(list(range(100))) - my_ob2 = pd.DataFrame.from_dict( - {"one": np.random.uniform(0, 1, 10), "two": np.random.uniform(0, 1, 10)} - ) + my_ob2 = pd.DataFrame.from_dict({'one': np.random.uniform(0, 1, 10), 'two': np.random.uniform(0, 1, 10)}) - meta = {"meta1": 1.0, "meta2": 2.0} + meta = {'meta1': 1.0, 'meta2': 2.0} - test_file = "test.hd5" + test_file = 'test.hd5' - with serialize.Serialization(test_file, mode="w") as store: + with serialize.Serialization(test_file, mode='w') as store: - store.store_pandas_object("test_obj", my_obj, **meta) - store.store_pandas_object("test_df", my_ob2) + store.store_pandas_object('test_obj', my_obj, **meta) + store.store_pandas_object('test_df', my_ob2) assert os.path.exists(test_file) # Now retrieve the object - with serialize.Serialization(test_file, mode="r") as store: + with serialize.Serialization(test_file, mode='r') as store: - copy_obj, copy_meta = store.retrieve_pandas_object("test_obj") - copy_obj2, copy_meta2 = store.retrieve_pandas_object("test_df") + copy_obj, copy_meta = store.retrieve_pandas_object('test_obj') + copy_obj2, copy_meta2 = store.retrieve_pandas_object('test_df') - assert set(store.keys) == set(("/test_obj", "/test_df")) + assert set(store.keys) == set(('/test_obj','/test_df')) assert np.alltrue(my_obj == copy_obj) assert np.alltrue(my_ob2 == copy_obj2) @@ -38,6 +36,10 @@ def test_serialization(): assert meta[k] == copy_meta[k] - assert len(copy_meta2) == 0 + assert len(copy_meta2)==0 os.remove(test_file) + + + + diff --git a/hawc_hal/util.py b/hawc_hal/util.py index 0a04a22..16c54d2 100644 --- a/hawc_hal/util.py +++ b/hawc_hal/util.py @@ -5,7 +5,9 @@ def cartesian(arrays): - return np.dstack(np.meshgrid(*arrays, indexing="ij")).reshape(-1, len(arrays)) + return np.dstack( + np.meshgrid(*arrays, indexing='ij') + ).reshape(-1, len(arrays)) def cartesian_(arrays, out=None): @@ -25,7 +27,7 @@ def cartesian_(arrays, out=None): out = np.zeros([n, len(arrays)], dtype=dtype) m = old_div(n, arrays[0].size) - out[:, 0] = np.repeat(arrays[0], m) + out[:,0] = np.repeat(arrays[0], m) if arrays[1:]: @@ -33,7 +35,7 @@ def cartesian_(arrays, out=None): for j in range(1, arrays[0].size): - out[j * m : (j + 1) * m, 1:] = out[0:m, 1:] + out[j*m:(j+1)*m, 1:] = out[0:m, 1:] return out From c518159010ba57a9fd58445786a7c8e6f1b79f37 Mon Sep 17 00:00:00 2001 From: Ramiro Torres-Escobedo Date: Fri, 2 Jun 2023 23:43:01 +0800 Subject: [PATCH 6/9] adding scripts directory during HAL installation --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 455d1c7..7c2d981 100644 --- a/setup.py +++ b/setup.py @@ -33,7 +33,7 @@ def find_data_files(directory): "hawc_hal/region_of_interest", "hawc_hal/convenience_functions", "hawc_hal/tests", - "hawc_hal/scripts", + "scripts", ], url="https://github.com/threeML/hawc_hal", license="BSD-3.0", From 26c7464e00d7fe1123b4c0fa6539686c624fc4cc Mon Sep 17 00:00:00 2001 From: Ramiro Torres-Escobedo Date: Sat, 3 Jun 2023 00:15:11 +0800 Subject: [PATCH 7/9] Revert "adding scripts directory during HAL installation" This reverts commit c518159010ba57a9fd58445786a7c8e6f1b79f37. --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 7c2d981..455d1c7 100644 --- a/setup.py +++ b/setup.py @@ -33,7 +33,7 @@ def find_data_files(directory): "hawc_hal/region_of_interest", "hawc_hal/convenience_functions", "hawc_hal/tests", - "scripts", + "hawc_hal/scripts", ], url="https://github.com/threeML/hawc_hal", license="BSD-3.0", From 3d7df5c3888b84e13564077c2c27677eb8210b1c Mon Sep 17 00:00:00 2001 From: Ramiro Torres-Escobedo Date: Sat, 3 Jun 2023 00:15:47 +0800 Subject: [PATCH 8/9] Revert "adding scripts directory to be included during HAL installation" This reverts commit 83ca95c0945e5e7450dcb1689462b47298f5b7bf. --- setup.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/setup.py b/setup.py index 455d1c7..39d20de 100644 --- a/setup.py +++ b/setup.py @@ -1,16 +1,17 @@ -import os -import os.path - from setuptools import setup +import os, os.path # Create list of data files def find_data_files(directory): + paths = [] - for path, directories, filenames in os.walk(directory): + for (path, directories, filenames) in os.walk(directory): + for filename in filenames: + paths.append(os.path.join("..", path, filename)) return paths @@ -33,7 +34,6 @@ def find_data_files(directory): "hawc_hal/region_of_interest", "hawc_hal/convenience_functions", "hawc_hal/tests", - "hawc_hal/scripts", ], url="https://github.com/threeML/hawc_hal", license="BSD-3.0", From bdd423b68760b761dfd7f15b03e38c4d8f8b734a Mon Sep 17 00:00:00 2001 From: Ramiro Torres-Escobedo Date: Sun, 4 Jun 2023 12:43:14 +0800 Subject: [PATCH 9/9] fixing issue with numpy no longer having the warnings attribute as of v1.24.3 --- .../healpix_handling/gnomonic_projection.py | 77 +++++++++++-------- 1 file changed, 43 insertions(+), 34 deletions(-) diff --git a/hawc_hal/healpix_handling/gnomonic_projection.py b/hawc_hal/healpix_handling/gnomonic_projection.py index c4a0fca..e2584f6 100644 --- a/hawc_hal/healpix_handling/gnomonic_projection.py +++ b/hawc_hal/healpix_handling/gnomonic_projection.py @@ -1,6 +1,6 @@ -import numpy as np +import warnings + from healpy import projaxes as PA -import matplotlib.pyplot as plt def get_gnomonic_projection(figure, hpx_map, **kwargs): @@ -16,23 +16,23 @@ def get_gnomonic_projection(figure, hpx_map, **kwargs): :return: the array containing the projection. """ - defaults = {'coord': 'C', - 'rot': None, - 'format': '%g', - 'flip': 'astro', - 'xsize': 200, - 'ysize': None, - 'reso': 1.5, - 'nest': False, - 'min': None, - 'max': None, - 'cmap': None, - 'norm': None} + defaults = { + "coord": "C", + "rot": None, + "format": "%g", + "flip": "astro", + "xsize": 200, + "ysize": None, + "reso": 1.5, + "nest": False, + "min": None, + "max": None, + "cmap": None, + "norm": None, + } for key, default_value in list(defaults.items()): - if key not in kwargs: - kwargs[key] = default_value ## Colas, 2018-07-11: The following fails for really tall figures, @@ -47,26 +47,35 @@ def get_gnomonic_projection(figure, hpx_map, **kwargs): # extent[3] - margins[3] - margins[1]) extent = (0.05, 0.05, 0.9, 0.9) - ax = PA.HpxGnomonicAxes(figure, extent, - coord=kwargs['coord'], - rot=kwargs['rot'], - format=kwargs['format'], - flipconv=kwargs['flip']) + ax = PA.HpxGnomonicAxes( + figure, + extent, + coord=kwargs["coord"], + rot=kwargs["rot"], + format=kwargs["format"], + flipconv=kwargs["flip"], + ) # Suppress warnings about nans - with np.warnings.catch_warnings(): - - np.warnings.filterwarnings('ignore') + # ! numpy does not contain the warnings module as of v1.24.3 + # TODO: remove this when numpy is updated + # with np.warnings.catch_warnings(): + # + # np.warnings.filterwarnings('ignore') - img = ax.projmap(hpx_map, - nest=kwargs['nest'], - coord=kwargs['coord'], - vmin=kwargs['min'], - vmax=kwargs['max'], - xsize=kwargs['xsize'], - ysize=kwargs['ysize'], - reso=kwargs['reso'], - cmap=kwargs['cmap'], - norm=kwargs['norm']) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + img = ax.projmap( + hpx_map, + nest=kwargs["nest"], + coord=kwargs["coord"], + vmin=kwargs["min"], + vmax=kwargs["max"], + xsize=kwargs["xsize"], + ysize=kwargs["ysize"], + reso=kwargs["reso"], + cmap=kwargs["cmap"], + norm=kwargs["norm"], + ) return img