Mercurial > repos > astroteam > crbeam_astro_tool
comparison Generate_figures.py @ 0:f40d05521dca draft default tip
planemo upload for repository https://github.com/esg-epfl-apc/tools-astro/tree/main/tools commit de01e3c02a26cd6353a6b9b6f8d1be44de8ccd54
| author | astroteam |
|---|---|
| date | Fri, 25 Apr 2025 19:33:20 +0000 |
| parents | |
| children |
comparison
equal
deleted
inserted
replaced
| -1:000000000000 | 0:f40d05521dca |
|---|---|
| 1 #!/usr/bin/env python | |
| 2 # coding: utf-8 | |
| 3 | |
| 4 #!/usr/bin/env python | |
| 5 | |
| 6 # This script is generated with nb2galaxy | |
| 7 | |
| 8 # flake8: noqa | |
| 9 | |
| 10 import json | |
| 11 import os | |
| 12 import shutil | |
| 13 | |
| 14 from oda_api.json import CustomJSONEncoder | |
| 15 | |
| 16 # oda:usesResource oda:CRBeamS3 . | |
| 17 # oda:CRBeamS3 a oda:S3 . | |
| 18 # oda:CRBeamS3 oda:resourceBindingEnvVarName "S3_CREDENTIALS" . | |
| 19 | |
| 20 src_name = "NGC 1365" # http://odahub.io/ontology#AstrophysicalObject | |
| 21 | |
| 22 z_start = 0 # http://odahub.io/ontology#Float | |
| 23 Npart = 2000 # http://odahub.io/ontology#Integer ; oda:lower_limit 1 ; oda:upper_limit 100000 | |
| 24 particle_type = "gamma" # http://odahub.io/ontology#String ; oda:allowed_value "gamma","electron","proton" | |
| 25 Emax = 30 # http://odahub.io/ontology#Energy_TeV | |
| 26 Emin = 0.01 # http://odahub.io/ontology#Energy_TeV | |
| 27 EminSource = 1.0 # http://odahub.io/ontology#Energy_TeV | |
| 28 Gamma = 2.0 # http://odahub.io/ontology#Float | |
| 29 EGMF_fG = 10 # http://odahub.io/ontology#Float | |
| 30 lmaxEGMF_Mpc = 5 # http://odahub.io/ontology#Float | |
| 31 jet_half_size = 5.0 # oda:AngleDegrees | |
| 32 jet_direction = 0.0 # oda:AngleDegrees | |
| 33 psf = 1.0 # oda:AngleDegrees | |
| 34 window_size_RA = 2.0 # oda:AngleDegrees | |
| 35 window_size_DEC = 1.0 # oda:AngleDegrees | |
| 36 EBL = "Franceschini 2017" # http://odahub.io/ontology#String ; oda:allowed_value "Franceschini 2017","Stecker 2016 lower limit","Stecker 2016 upper limit","Inoue 2012 Baseline","Inoue 2012 lower limit","Inoue 2012 upper limit" | |
| 37 | |
| 38 _galaxy_wd = os.getcwd() | |
| 39 | |
| 40 with open("inputs.json", "r") as fd: | |
| 41 inp_dic = json.load(fd) | |
| 42 if "C_data_product_" in inp_dic.keys(): | |
| 43 inp_pdic = inp_dic["C_data_product_"] | |
| 44 else: | |
| 45 inp_pdic = inp_dic | |
| 46 src_name = str(inp_pdic["src_name"]) | |
| 47 z_start = float(inp_pdic["z_start"]) | |
| 48 Npart = int(inp_pdic["Npart"]) | |
| 49 particle_type = str(inp_pdic["particle_type"]) | |
| 50 Emax = float(inp_pdic["Emax"]) | |
| 51 Emin = float(inp_pdic["Emin"]) | |
| 52 EminSource = float(inp_pdic["EminSource"]) | |
| 53 Gamma = float(inp_pdic["Gamma"]) | |
| 54 EGMF_fG = float(inp_pdic["EGMF_fG"]) | |
| 55 lmaxEGMF_Mpc = float(inp_pdic["lmaxEGMF_Mpc"]) | |
| 56 jet_half_size = float(inp_pdic["jet_half_size"]) | |
| 57 jet_direction = float(inp_pdic["jet_direction"]) | |
| 58 psf = float(inp_pdic["psf"]) | |
| 59 window_size_RA = float(inp_pdic["window_size_RA"]) | |
| 60 window_size_DEC = float(inp_pdic["window_size_DEC"]) | |
| 61 EBL = str(inp_pdic["EBL"]) | |
| 62 | |
| 63 import os | |
| 64 | |
| 65 from astropy import units as u | |
| 66 from astropy.coordinates import SkyCoord | |
| 67 from astropy.nddata import StdDevUncertainty | |
| 68 from astropy.table import Table | |
| 69 from gammapy.maps import Map, MapAxis | |
| 70 from oda_api.api import ProgressReporter | |
| 71 from oda_api.data_products import ( | |
| 72 LightCurveDataProduct, | |
| 73 NumpyDataProduct, | |
| 74 ODAAstropyTable, | |
| 75 PictureProduct, | |
| 76 ) | |
| 77 from specutils import Spectrum1D | |
| 78 from utils import find_redshift | |
| 79 | |
| 80 if z_start <= 0: | |
| 81 z_start = find_redshift(src_name) | |
| 82 | |
| 83 source = SkyCoord.from_name(src_name, frame="icrs", parse=False, cache=True) | |
| 84 | |
| 85 pr = ProgressReporter() | |
| 86 pr.report_progress( | |
| 87 stage="Initialization", | |
| 88 progress=0, | |
| 89 substage="spectra", | |
| 90 subprogress=30, | |
| 91 message="some message", | |
| 92 ) | |
| 93 | |
| 94 import subprocess | |
| 95 | |
| 96 import light_curve as lc | |
| 97 import numpy as np | |
| 98 from crbeam import CRbeam | |
| 99 from matplotlib import pyplot as plt | |
| 100 from spec_plot import plot_spec | |
| 101 | |
| 102 EGMF = EGMF_fG * 1e-15 | |
| 103 | |
| 104 # internal units are eV | |
| 105 Emax *= 1e12 | |
| 106 Emin *= 1e12 | |
| 107 EminSource *= 1e12 | |
| 108 | |
| 109 background = { | |
| 110 "Franceschini 2017": 12, | |
| 111 "Stecker 2016 lower limit": 10, | |
| 112 "Stecker 2016 upper limit": 11, | |
| 113 "Inoue 2012 Baseline": 3, | |
| 114 "Inoue 2012 lower limit": 4, | |
| 115 "Inoue 2012 upper limit": 5, | |
| 116 }[EBL] | |
| 117 | |
| 118 prog = CRbeam( | |
| 119 z=z_start, | |
| 120 nparticles=Npart, | |
| 121 primary=particle_type, | |
| 122 emax=Emax, | |
| 123 emin=Emin, | |
| 124 emin_source=EminSource, | |
| 125 EGMF=EGMF, | |
| 126 lmaxEGMF=lmaxEGMF_Mpc, | |
| 127 background=background, | |
| 128 ) | |
| 129 cmd = prog.command | |
| 130 cmd | |
| 131 | |
| 132 # Here is one way to call CRbeam without global cache | |
| 133 # prog.run(force_overwrite=False) | |
| 134 # Here we call CRbeam with global cache | |
| 135 # prog.run_cached(overwrite_local_cache=True) | |
| 136 # to clear s3 cache run the following command | |
| 137 # prog.remove_cache() | |
| 138 | |
| 139 # To report the progress we will split running CRbeam into 10 parts | |
| 140 n_steps = 10 | |
| 141 # Initialize multistep simulation | |
| 142 data_exists = not prog.start_multistage_run( | |
| 143 overwrite_local_cache=True, n_steps=n_steps | |
| 144 ) | |
| 145 proceed = not data_exists | |
| 146 | |
| 147 if proceed: | |
| 148 for step in range(n_steps): | |
| 149 pr.report_progress( | |
| 150 stage="Running Simulation", progress=100 * step // n_steps | |
| 151 ) | |
| 152 proceed = prog.simulation_step() | |
| 153 # todo: report progress using rest API | |
| 154 pr.report_progress(stage="Running Simulation", progress=100) | |
| 155 | |
| 156 assert not proceed, "must be completed before this cell" | |
| 157 if not data_exists: | |
| 158 prog.end_multistep_run() | |
| 159 | |
| 160 def adjust_weights(mc_file, power): | |
| 161 # converting weights to mimic required injection spectrum power | |
| 162 header = "" | |
| 163 with open(mc_file, "rt") as lines: | |
| 164 for line in lines: | |
| 165 if len(line) > 0 and line[0] == "#": | |
| 166 header += line[1:].strip() + "\n" | |
| 167 else: | |
| 168 break | |
| 169 weight_col = 2 | |
| 170 E_src_col = 12 | |
| 171 data = np.loadtxt(mc_file) | |
| 172 weight = data[:, weight_col - 1] | |
| 173 E_src = data[:, E_src_col - 1] | |
| 174 orig_power = ( | |
| 175 prog.power_law | |
| 176 ) # CRBeam is always called with fixed power=1 to optimize cache usage | |
| 177 weight *= np.power(E_src / Emax, -(power - orig_power)) | |
| 178 output_file = f"{mc_file}_p{power}" | |
| 179 np.savetxt(output_file, data, header=header.strip(), fmt="%.6g") | |
| 180 return output_file | |
| 181 | |
| 182 # this code will not execute program since files already exist on server | |
| 183 # prog.run(force_overwrite=False) | |
| 184 | |
| 185 # one can upload cache explicitely by call | |
| 186 # prog.upload_cache() | |
| 187 | |
| 188 pr.report_progress(stage="Building Plots and Tables", progress=0) | |
| 189 | |
| 190 print(prog.output_path) | |
| 191 get_ipython().system("ls {prog.output_path}") # noqa: F821 | |
| 192 | |
| 193 mc_file = prog.output_path + "/photon" | |
| 194 | |
| 195 # how the data looks like | |
| 196 get_ipython().system("cat {mc_file} | awk 'NR<=5'") # noqa: F821 | |
| 197 | |
| 198 if Emax != EminSource: | |
| 199 mc_file = adjust_weights(mc_file, Gamma) | |
| 200 | |
| 201 # how the data looks like | |
| 202 get_ipython().system("cat {mc_file} | awk 'NR<=5'") # noqa: F821 | |
| 203 | |
| 204 subprocess.run( | |
| 205 [ | |
| 206 "bash", | |
| 207 os.path.join(os.environ.get("BASEDIR", os.getcwd()), "makeSpecE2.sh"), | |
| 208 mc_file, | |
| 209 ] | |
| 210 ) | |
| 211 | |
| 212 # rotating the beam | |
| 213 | |
| 214 if EGMF > 0: | |
| 215 from mc_rotate import mc_rotate | |
| 216 | |
| 217 mc_rotated_file = mc_rotate( | |
| 218 mc_file, jet_half_size, jet_direction, psf_deg=psf | |
| 219 ) | |
| 220 else: | |
| 221 mc_rotated_file = mc_file | |
| 222 mc_rotated_file | |
| 223 | |
| 224 subprocess.run( | |
| 225 [ | |
| 226 "bash", | |
| 227 os.path.join(os.environ.get("BASEDIR", os.getcwd()), "makeSpecE2.sh"), | |
| 228 mc_rotated_file, | |
| 229 ] | |
| 230 ) | |
| 231 | |
| 232 # how the rotated data looks like | |
| 233 get_ipython().system("cat {mc_rotated_file} | awk 'NR<=5'") # noqa: F821 | |
| 234 | |
| 235 # reading the source distance in Mpc from the data file | |
| 236 T_Mpc = lc.get_distance_Mpc(mc_file) | |
| 237 T_Mpc | |
| 238 | |
| 239 # building the spectrum figure for total flux | |
| 240 | |
| 241 spec_file = mc_file + ".spec" | |
| 242 spec_fig = plot_spec( | |
| 243 spec_file, title="spectrum", ext="png", show=False, logscale=True | |
| 244 ) | |
| 245 spec_image = PictureProduct.from_file(spec_fig) | |
| 246 | |
| 247 # building the spectrum figure for the flux within PSF | |
| 248 spec_rotated_file = mc_rotated_file + ".spec" | |
| 249 spec_rotated_fig = plot_spec( | |
| 250 spec_rotated_file, | |
| 251 title="spectrum", | |
| 252 Emin=Emin, | |
| 253 Emax=Emax, | |
| 254 ext="png", | |
| 255 show=False, | |
| 256 logscale=True, | |
| 257 ) | |
| 258 spec_rotated_image = PictureProduct.from_file(spec_rotated_fig) | |
| 259 | |
| 260 spec_rotated_file | |
| 261 | |
| 262 lc_params = dict( | |
| 263 logscale=True, | |
| 264 max_t=-99, # 8760000, | |
| 265 n_points=30, | |
| 266 psf=psf, | |
| 267 min_n_particles=10, | |
| 268 cut_0=True, | |
| 269 ) | |
| 270 | |
| 271 # building the light curve figure | |
| 272 if EGMF > 0: | |
| 273 light_curve_fig = lc.make_plot(mc_rotated_file, **lc_params) | |
| 274 light_curve_image = PictureProduct.from_file(light_curve_fig) | |
| 275 else: | |
| 276 # to avoid possible problems with absent figure | |
| 277 light_curve_image = PictureProduct.from_file(spec_fig) | |
| 278 | |
| 279 l_curve = None | |
| 280 if EGMF > 0: | |
| 281 delay, weights = lc.get_counts(mc_rotated_file, **lc_params) | |
| 282 t, f, N = lc.light_curve(delay, weights, **lc_params) | |
| 283 l_curve = LightCurveDataProduct.from_arrays( | |
| 284 times=t * 3600.0, # t is in hours | |
| 285 fluxes=f, | |
| 286 errors=f / np.sqrt(N), | |
| 287 time_format="unix", | |
| 288 ) | |
| 289 | |
| 290 def convert_to_ICRS(phi: np.array, theta: np.array, source: SkyCoord): | |
| 291 # prime system has z-axes pointing the source centered at observer | |
| 292 # it is obtained by rotation of source system by 180 degrees about x-axis | |
| 293 # prime system coords have suffix "prime" (') | |
| 294 # icrs system coords has empty suffix | |
| 295 # TODO: add param ic_jet_plane_direction: SkyCoord - gives plane which contains jet | |
| 296 # by definition in prime system the jet is in y'-z' plane and z' axes points from the source towards the observer | |
| 297 # for simplicity we will first assume that prime frame is oriented as ICRS frame (use SkyOffsetFrame with rotation=0) | |
| 298 | |
| 299 # coordinates of unit direction vector in prime system | |
| 300 | |
| 301 # direction of event arrival at prime system | |
| 302 z_prime = np.cos( | |
| 303 theta / 180.0 * np.pi | |
| 304 ) # (-1)*(-1) = 1 (rotating system and tacking oposite vector) | |
| 305 r_xy_prime = np.sin(theta / 180.0 * np.pi) | |
| 306 x_prime = -r_xy_prime * np.cos( | |
| 307 phi / 180.0 * np.pi | |
| 308 ) # (1)*(-1) = -1 (rotating system and tacking oposite vector) | |
| 309 y_prime = r_xy_prime * np.sin( | |
| 310 phi / 180.0 * np.pi | |
| 311 ) # (-1)*(-1) = 1 (rotating system and tacking oposite vector) | |
| 312 | |
| 313 print("x',y',z' =", x_prime, y_prime, z_prime) | |
| 314 | |
| 315 # angle between z and z' axes | |
| 316 delta1 = 90 * u.deg - source.dec | |
| 317 | |
| 318 print("source:", source.cartesian) | |
| 319 print("delta1 = ", delta1) | |
| 320 | |
| 321 # rotating about y-axes | |
| 322 | |
| 323 x1 = x_prime * np.cos(delta1) + z_prime * np.sin(delta1) | |
| 324 y1 = y_prime | |
| 325 z1 = -x_prime * np.sin(delta1) + z_prime * np.cos(delta1) | |
| 326 | |
| 327 print("step 1: x,y,z =", x1, y1, z1) | |
| 328 | |
| 329 # rotation to -RA about z axis | |
| 330 delta2 = source.ra | |
| 331 x = x1 * np.cos(delta2) - y1 * np.sin(delta2) | |
| 332 y = x1 * np.sin(delta2) + y1 * np.cos(delta2) | |
| 333 z = z1 | |
| 334 | |
| 335 print("x,y,z =", x, y, z) | |
| 336 | |
| 337 event_coords = SkyCoord( | |
| 338 x=x, y=y, z=z, frame="icrs", representation_type="cartesian" | |
| 339 ).spherical | |
| 340 # print(event_coords.spherical) | |
| 341 return event_coords | |
| 342 | |
| 343 def LoadCubeTemplate( | |
| 344 mc_file: str, | |
| 345 source: SkyCoord, | |
| 346 Emax=1e15, | |
| 347 Emin=1e9, | |
| 348 bins_per_decade=20, | |
| 349 binsz=0.02, | |
| 350 theta_mult=1, | |
| 351 sec_component_mult=1, | |
| 352 remove_straight=False, | |
| 353 symmetric_split=0, | |
| 354 ): | |
| 355 pass | |
| 356 | |
| 357 mc_data = np.loadtxt(mc_file) | |
| 358 E = mc_data[:, 0] | |
| 359 print(f"dataset energy: {np.min(E)} <= E/eV <= {np.max(E)}") | |
| 360 Theta = mc_data[:, 2] * theta_mult # theta_mult is used for debug only | |
| 361 print(f"dataset Theta: {np.min(Theta)} <= Theta <= {np.max(Theta)}") | |
| 362 # idx = np.where((E >= Emin) & (E<=Emax) & (Theta<theta_max))[0] | |
| 363 print(f"Filters: {Emin} <= E/eV <= {Emax}") | |
| 364 condition = (E >= Emin) & (E <= Emax) | |
| 365 if remove_straight: | |
| 366 condition &= Theta > 0 | |
| 367 | |
| 368 idx = np.where(condition)[0] | |
| 369 | |
| 370 print("filtered dataset length:", len(idx)) | |
| 371 | |
| 372 E = E[idx] | |
| 373 Theta = Theta[idx] | |
| 374 w = mc_data[:, 1][idx] | |
| 375 Phi = mc_data[:, 3][idx] | |
| 376 # t = mc_data[:,5][idx] | |
| 377 # t *= time_scale_hours | |
| 378 if sec_component_mult != 1: # sec_component_mult is used for debug only | |
| 379 print("WARNING: Sec component multiplicity:", sec_component_mult) | |
| 380 w[Theta > 0] *= sec_component_mult | |
| 381 | |
| 382 if len(idx) > 0: | |
| 383 # print(f'{np.min(t)} <= t/h <= {np.max(t)}') | |
| 384 print(f"{np.min(E)} <= E <= {np.max(E)}") | |
| 385 | |
| 386 energy_axis_name = "energy_true" | |
| 387 | |
| 388 energy_axis = MapAxis.from_energy_bounds( | |
| 389 Emin * u.eV, | |
| 390 Emax * u.eV, | |
| 391 nbin=bins_per_decade, | |
| 392 name=energy_axis_name, | |
| 393 per_decade=True, | |
| 394 ) | |
| 395 | |
| 396 m_cube = Map.create( | |
| 397 binsz=binsz, | |
| 398 width=(window_size_RA * u.deg, window_size_DEC * u.deg), | |
| 399 frame="icrs", | |
| 400 axes=[energy_axis], | |
| 401 skydir=SkyCoord(source), | |
| 402 ) | |
| 403 | |
| 404 print(m_cube.geom) | |
| 405 | |
| 406 if len(idx) > 0: | |
| 407 if symmetric_split > 1: | |
| 408 Theta = np.repeat(Theta, symmetric_split) | |
| 409 E = np.repeat(E, symmetric_split) | |
| 410 w = np.repeat(w, symmetric_split) / symmetric_split | |
| 411 Phi = np.random.random(len(Theta)) * 360 | |
| 412 | |
| 413 sc = convert_to_ICRS(Phi, Theta, source) | |
| 414 m_cube.fill_by_coord( | |
| 415 {"lat": sc.lat, "lon": sc.lon, energy_axis_name: E * u.eV}, | |
| 416 weights=w, | |
| 417 ) | |
| 418 | |
| 419 return m_cube | |
| 420 | |
| 421 def LoadTemplate4D( | |
| 422 mc_file: str, | |
| 423 source: SkyCoord, | |
| 424 redshift, | |
| 425 Emax=1e15, | |
| 426 Emin=1e9, | |
| 427 bins_per_decade=20, | |
| 428 binsz=0.02, | |
| 429 theta_mult=1, | |
| 430 max_time_quantile=1.0, | |
| 431 n_time_bins=100, | |
| 432 symmetric_split=0, | |
| 433 ): | |
| 434 from astropy.cosmology import FlatLambdaCDM | |
| 435 | |
| 436 cosmo = FlatLambdaCDM( | |
| 437 H0=67.8 * u.km / u.s / u.Mpc, Tcmb0=2.725 * u.K, Om0=(1 - 0.692) | |
| 438 ) | |
| 439 time_scale_hours = float( | |
| 440 ((cosmo.age(0) - cosmo.age(z_start)) / u.h).decompose() | |
| 441 ) | |
| 442 | |
| 443 mc_data = np.loadtxt(mc_file) | |
| 444 E = mc_data[:, 0] | |
| 445 | |
| 446 print("dataset length:", len(mc_data)) | |
| 447 print(f"dataset energy: {np.min(E)} <= E/eV <= {np.max(E)}") | |
| 448 Theta = mc_data[:, 2] * theta_mult # theta_mult is used for debug only | |
| 449 print(f"dataset Theta: {np.min(Theta)} <= Theta <= {np.max(Theta)}") | |
| 450 # idx = np.where((E >= Emin) & (E<=Emax) & (Theta<theta_max))[0] | |
| 451 | |
| 452 idx = np.where((E >= Emin) & (E <= Emax))[0] | |
| 453 | |
| 454 print(f"Filters: {Emin} <= E/eV <= {Emax}") | |
| 455 print("filtered dataset length:", len(idx)) | |
| 456 | |
| 457 E = E[idx] | |
| 458 Theta = Theta[idx] | |
| 459 w = mc_data[:, 1][idx] | |
| 460 Phi = mc_data[:, 3][idx] | |
| 461 t = mc_data[:, 5][idx] | |
| 462 t *= time_scale_hours | |
| 463 min_time = np.min(t) | |
| 464 if max_time_quantile >= 1.0: | |
| 465 max_time = np.max(t) | |
| 466 else: | |
| 467 max_time = np.quantile(t, max_time_quantile) | |
| 468 | |
| 469 if max_time == min_time: | |
| 470 max_time = min_time + 1e-10 | |
| 471 | |
| 472 idx = np.where(t <= max_time)[0] | |
| 473 print(f"Filters: {Emin} <= E/eV <= {Emax}, t/h <= {max_time}") | |
| 474 print("filtered dataset length:", len(idx)) | |
| 475 | |
| 476 E = E[idx] | |
| 477 Theta = Theta[idx] | |
| 478 w = w[idx] | |
| 479 Phi = Phi[idx] | |
| 480 t = t[idx] | |
| 481 | |
| 482 energy_axis_name = "energy_true" | |
| 483 | |
| 484 energy_axis = MapAxis.from_energy_bounds( | |
| 485 Emin * u.eV, | |
| 486 Emax * u.eV, | |
| 487 nbin=bins_per_decade, | |
| 488 name=energy_axis_name, | |
| 489 per_decade=True, | |
| 490 ) | |
| 491 | |
| 492 from astropy.time import Time | |
| 493 | |
| 494 reference_time = Time(0, format="unix") | |
| 495 | |
| 496 delta = max_time - min_time | |
| 497 (min_time + delta * np.linspace(0, 1, n_time_bins + 1)) * u.h | |
| 498 # time_axis = TimeMapAxis.from_time_edges( | |
| 499 # time_min=time_edges[:-1], | |
| 500 # time_max=time_edges[1:], | |
| 501 # interp="lin", | |
| 502 # unit=u.h, | |
| 503 # name='time', | |
| 504 # reference_time=reference_time | |
| 505 # ) | |
| 506 | |
| 507 time_axis = MapAxis.from_bounds( | |
| 508 min_time, max_time, n_time_bins, name="time", unit=u.h | |
| 509 ) | |
| 510 | |
| 511 # time_axis = TimeMapAxis(time_edges[:-1], time_edges[1:], reference_time, name='time', interp='lin') | |
| 512 | |
| 513 # time_axis = TimeMapAxis.from_time_bounds(min_time, max_time, n_time_bins, unit=u.h, name='time') | |
| 514 | |
| 515 map4d = Map.create( | |
| 516 binsz=binsz, | |
| 517 width=(window_size_RA * u.deg, window_size_DEC * u.deg), | |
| 518 frame="icrs", | |
| 519 axes=[energy_axis, time_axis], | |
| 520 skydir=SkyCoord(source), | |
| 521 ) | |
| 522 | |
| 523 print(map4d.geom) | |
| 524 | |
| 525 if len(idx) > 0: | |
| 526 if symmetric_split > 1: | |
| 527 Theta = np.repeat(Theta, symmetric_split) | |
| 528 E = np.repeat(E, symmetric_split) | |
| 529 t = np.repeat(t, symmetric_split) | |
| 530 w = np.repeat(w, symmetric_split) / symmetric_split | |
| 531 Phi = np.random.random(len(Theta)) * 360 | |
| 532 | |
| 533 sc = convert_to_ICRS(Phi, Theta, source) | |
| 534 map4d.fill_by_coord( | |
| 535 { | |
| 536 "lat": sc.lat, | |
| 537 "lon": sc.lon, | |
| 538 energy_axis_name: E * u.eV, | |
| 539 "time": t * u.h, | |
| 540 }, | |
| 541 weights=w, | |
| 542 ) | |
| 543 | |
| 544 return map4d | |
| 545 | |
| 546 bins_per_decade = 20 | |
| 547 | |
| 548 symmetric_split = 0 if jet_direction != 0 else 100 | |
| 549 | |
| 550 map3d = LoadCubeTemplate( | |
| 551 mc_rotated_file, | |
| 552 source=source, | |
| 553 Emin=Emin, | |
| 554 Emax=Emax, | |
| 555 bins_per_decade=bins_per_decade, | |
| 556 symmetric_split=symmetric_split, | |
| 557 ) | |
| 558 | |
| 559 map3d.write("map3d.fits", overwrite=True) | |
| 560 | |
| 561 map4d = LoadTemplate4D( | |
| 562 mc_rotated_file, | |
| 563 source=source, | |
| 564 redshift=z_start, | |
| 565 Emin=Emin, | |
| 566 Emax=Emax, | |
| 567 bins_per_decade=bins_per_decade, | |
| 568 symmetric_split=symmetric_split, | |
| 569 ) | |
| 570 map4d.write("map4d.fits", overwrite=True) | |
| 571 | |
| 572 d = np.genfromtxt(spec_file) | |
| 573 ee = d[:, 0] | |
| 574 ff = d[:, 1] | |
| 575 ff_err = ff / np.sqrt(d[:, 2]) | |
| 576 plt.errorbar(ee, ff, yerr=ff_err, label="total flux") | |
| 577 d = np.genfromtxt(spec_rotated_file) | |
| 578 ee1 = d[:, 0] | |
| 579 ff1 = d[:, 1] | |
| 580 ff_err1 = ff1 / np.sqrt(d[:, 2]) | |
| 581 plt.errorbar(ee1, ff1, yerr=ff_err1, label="PSF flux") | |
| 582 plt.xscale("log") | |
| 583 plt.yscale("log") | |
| 584 plt.xlabel("Energy, eV") | |
| 585 plt.ylabel("$E^2dN/dE$, arbitrary units") | |
| 586 plt.legend(loc="lower right") | |
| 587 plt.savefig("Spectrum.png", format="png", bbox_inches="tight") | |
| 588 | |
| 589 bin_image = PictureProduct.from_file("Spectrum.png") | |
| 590 | |
| 591 data = [ee, ff, ff_err] | |
| 592 names = ("Energy", "Total_flux", "Total_flux_err") | |
| 593 table = ODAAstropyTable(Table(data, names=names)) | |
| 594 data = [ee, ff, ff_err] | |
| 595 names = ("Energy", "PSF_flux", "PSF_flux_err") | |
| 596 table1 = ODAAstropyTable(Table(data, names=names)) | |
| 597 | |
| 598 flux_unit = u.eV / u.cm / u.cm / u.s / u.sr | |
| 599 spec = Spectrum1D( | |
| 600 spectral_axis=ee * u.eV, | |
| 601 flux=ff * flux_unit, | |
| 602 uncertainty=StdDevUncertainty(ff_err), | |
| 603 ) | |
| 604 spec.write("spec.fits", overwrite=True) | |
| 605 dp = NumpyDataProduct.from_fits_file("spec.fits") | |
| 606 spec_rotated = Spectrum1D( | |
| 607 spectral_axis=ee1 * u.eV, | |
| 608 flux=ff1 * flux_unit, | |
| 609 uncertainty=StdDevUncertainty(ff_err1), | |
| 610 ) | |
| 611 spec_rotated.write("spec_rotated.fits", overwrite=True) | |
| 612 dp_rotated = NumpyDataProduct.from_fits_file("spec_rotated.fits") | |
| 613 map3d = NumpyDataProduct.from_fits_file("map3d.fits") | |
| 614 map4d = NumpyDataProduct.from_fits_file("map4d.fits") | |
| 615 | |
| 616 pr.report_progress(stage="Building Plots and Tables", progress=100) | |
| 617 | |
| 618 spectrum_png = bin_image # http://odahub.io/ontology#ODAPictureProduct | |
| 619 light_curve_png = ( | |
| 620 light_curve_image # http://odahub.io/ontology#ODAPictureProduct | |
| 621 ) | |
| 622 total_spectrum_table = table # http://odahub.io/ontology#ODAAstropyTable | |
| 623 psf_spectrum_table = table1 # http://odahub.io/ontology#ODAAstropyTable | |
| 624 lc_result = l_curve # http://odahub.io/ontology#LightCurve | |
| 625 spectrum = dp # http://odahub.io/ontology#Spectrum | |
| 626 spectrum_rotated = dp_rotated # http://odahub.io/ontology#Spectrum | |
| 627 map3d = map3d # http://odahub.io/ontology#Spectrum | |
| 628 map4d = map4d # http://odahub.io/ontology#Spectrum | |
| 629 | |
| 630 # spec_png = spec_image # http://odahub.io/ontology#ODAPictureProduct | |
| 631 # spec_rotated_png = spec_rotated_image # http://odahub.io/ontology#ODAPictureProduct | |
| 632 | |
| 633 # output gathering | |
| 634 _galaxy_meta_data = {} | |
| 635 _oda_outs = [] | |
| 636 _oda_outs.append( | |
| 637 ( | |
| 638 "out_Generate_figures_spectrum_png", | |
| 639 "spectrum_png_galaxy.output", | |
| 640 spectrum_png, | |
| 641 ) | |
| 642 ) | |
| 643 _oda_outs.append( | |
| 644 ( | |
| 645 "out_Generate_figures_light_curve_png", | |
| 646 "light_curve_png_galaxy.output", | |
| 647 light_curve_png, | |
| 648 ) | |
| 649 ) | |
| 650 _oda_outs.append( | |
| 651 ( | |
| 652 "out_Generate_figures_total_spectrum_table", | |
| 653 "total_spectrum_table_galaxy.output", | |
| 654 total_spectrum_table, | |
| 655 ) | |
| 656 ) | |
| 657 _oda_outs.append( | |
| 658 ( | |
| 659 "out_Generate_figures_psf_spectrum_table", | |
| 660 "psf_spectrum_table_galaxy.output", | |
| 661 psf_spectrum_table, | |
| 662 ) | |
| 663 ) | |
| 664 _oda_outs.append( | |
| 665 ("out_Generate_figures_lc_result", "lc_result_galaxy.output", lc_result) | |
| 666 ) | |
| 667 _oda_outs.append( | |
| 668 ("out_Generate_figures_spectrum", "spectrum_galaxy.output", spectrum) | |
| 669 ) | |
| 670 _oda_outs.append( | |
| 671 ( | |
| 672 "out_Generate_figures_spectrum_rotated", | |
| 673 "spectrum_rotated_galaxy.output", | |
| 674 spectrum_rotated, | |
| 675 ) | |
| 676 ) | |
| 677 _oda_outs.append(("out_Generate_figures_map3d", "map3d_galaxy.output", map3d)) | |
| 678 _oda_outs.append(("out_Generate_figures_map4d", "map4d_galaxy.output", map4d)) | |
| 679 | |
| 680 for _outn, _outfn, _outv in _oda_outs: | |
| 681 _galaxy_outfile_name = os.path.join(_galaxy_wd, _outfn) | |
| 682 if isinstance(_outv, str) and os.path.isfile(_outv): | |
| 683 shutil.move(_outv, _galaxy_outfile_name) | |
| 684 _galaxy_meta_data[_outn] = {"ext": "_sniff_"} | |
| 685 elif getattr(_outv, "write_fits_file", None): | |
| 686 _outv.write_fits_file(_galaxy_outfile_name) | |
| 687 _galaxy_meta_data[_outn] = {"ext": "fits"} | |
| 688 elif getattr(_outv, "write_file", None): | |
| 689 _outv.write_file(_galaxy_outfile_name) | |
| 690 _galaxy_meta_data[_outn] = {"ext": "_sniff_"} | |
| 691 else: | |
| 692 with open(_galaxy_outfile_name, "w") as fd: | |
| 693 json.dump(_outv, fd, cls=CustomJSONEncoder) | |
| 694 _galaxy_meta_data[_outn] = {"ext": "json"} | |
| 695 | |
| 696 with open(os.path.join(_galaxy_wd, "galaxy.json"), "w") as fd: | |
| 697 json.dump(_galaxy_meta_data, fd) | |
| 698 print("*** Job finished successfully ***") |
