changeset 0:f40d05521dca draft default tip

planemo upload for repository https://github.com/esg-epfl-apc/tools-astro/tree/main/tools commit de01e3c02a26cd6353a6b9b6f8d1be44de8ccd54
author astroteam
date Fri, 25 Apr 2025 19:33:20 +0000
parents
children
files Generate_events.py Generate_figures.py cache.py crbeam.py crbeam_astro_tool.xml light_curve.py makeSpecE2.sh mc_rotate.py spec_plot.py utils.py
diffstat 10 files changed, 2129 insertions(+), 0 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Generate_events.py	Fri Apr 25 19:33:20 2025 +0000
@@ -0,0 +1,249 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+#!/usr/bin/env python
+
+# This script is generated with nb2galaxy
+
+# flake8: noqa
+
+import json
+import os
+import shutil
+
+from oda_api.json import CustomJSONEncoder
+
+# oda:usesResource oda:CRBeamS3 .
+# oda:CRBeamS3 a oda:S3 .
+# oda:CRBeamS3 oda:resourceBindingEnvVarName "S3_CREDENTIALS" .
+
+src_name = "1ES 1215+303"  # http://odahub.io/ontology#AstrophysicalObject
+
+z_start = 0.13  # http://odahub.io/ontology#Float
+Npart = 10000  # http://odahub.io/ontology#Integer ; oda:lower_limit 1 ; oda:upper_limit 100000
+particle_type = "gamma"  # http://odahub.io/ontology#String ; oda:allowed_value "gamma","electron","proton"
+Emax = 50  # http://odahub.io/ontology#Energy_TeV
+Emin = 0.01  # http://odahub.io/ontology#Energy_TeV
+EminSource = 0.01  # http://odahub.io/ontology#Energy_TeV
+Gamma = 2.0  # http://odahub.io/ontology#Float
+EGMF_fG = 100  # http://odahub.io/ontology#Float
+lmaxEGMF_Mpc = 5  # http://odahub.io/ontology#Float
+jet_half_size = 180.0  # http://odahub.io/ontology#AngleDegrees
+jet_direction = 0.0  # http://odahub.io/ontology#AngleDegrees
+psf = 180.0  # http://odahub.io/ontology#AngleDegrees
+window_size_RA = 4.0  # http://odahub.io/ontology#AngleDegrees
+window_size_DEC = 4.0  # http://odahub.io/ontology#AngleDegrees
+EBL = "Franceschini 2017"  # http://odahub.io/ontology#String ; oda:allowed_value "Franceschini 2017","Stecker 2016 lower limit","Stecker 2016 upper limit","Inoue 2012 Baseline","Inoue 2012 lower limit","Inoue 2012 upper limit"
+
+_galaxy_wd = os.getcwd()
+
+with open("inputs.json", "r") as fd:
+    inp_dic = json.load(fd)
+if "C_data_product_" in inp_dic.keys():
+    inp_pdic = inp_dic["C_data_product_"]
+else:
+    inp_pdic = inp_dic
+src_name = str(inp_pdic["src_name"])
+z_start = float(inp_pdic["z_start"])
+Npart = int(inp_pdic["Npart"])
+particle_type = str(inp_pdic["particle_type"])
+Emax = float(inp_pdic["Emax"])
+Emin = float(inp_pdic["Emin"])
+EminSource = float(inp_pdic["EminSource"])
+Gamma = float(inp_pdic["Gamma"])
+EGMF_fG = float(inp_pdic["EGMF_fG"])
+lmaxEGMF_Mpc = float(inp_pdic["lmaxEGMF_Mpc"])
+jet_half_size = float(inp_pdic["jet_half_size"])
+jet_direction = float(inp_pdic["jet_direction"])
+psf = float(inp_pdic["psf"])
+window_size_RA = float(inp_pdic["window_size_RA"])
+window_size_DEC = float(inp_pdic["window_size_DEC"])
+EBL = str(inp_pdic["EBL"])
+
+import os
+
+from astropy.coordinates import SkyCoord
+from astropy.table import Table
+from oda_api.api import ProgressReporter
+from oda_api.data_products import ODAAstropyTable
+from utils import find_redshift
+
+if z_start <= 0:
+    z_start = find_redshift(src_name)
+
+source = SkyCoord.from_name(src_name, frame="icrs", parse=False, cache=True)
+
+pr = ProgressReporter()
+pr.report_progress(
+    stage="Initialization",
+    progress=0,
+    substage="spectra",
+    subprogress=30,
+    message="some message",
+)
+
+import light_curve as lc
+import numpy as np
+from crbeam import CRbeam
+
+EGMF = EGMF_fG * 1e-15
+
+# internal units are eV
+Emax *= 1e12
+Emin *= 1e12
+EminSource *= 1e12
+
+background = {
+    "Franceschini 2017": 12,
+    "Stecker 2016 lower limit": 10,
+    "Stecker 2016 upper limit": 11,
+    "Inoue 2012 Baseline": 3,
+    "Inoue 2012 lower limit": 4,
+    "Inoue 2012 upper limit": 5,
+}[EBL]
+
+prog = CRbeam(
+    z=z_start,
+    nparticles=Npart,
+    primary=particle_type,
+    emax=Emax,
+    emin=Emin,
+    emin_source=EminSource,
+    EGMF=EGMF,
+    lmaxEGMF=lmaxEGMF_Mpc,
+    background=background,
+)
+cmd = prog.command
+cmd
+
+# Here is one way to call CRbeam without global cache
+# prog.run(force_overwrite=False)
+# Here we call CRbeam with global cache
+# prog.run_cached(overwrite_local_cache=True)
+# to clear s3 cache run the following command
+# prog.remove_cache()
+
+# To report the progress we will split running CRbeam into 10 parts
+n_steps = 10
+# Initialize multistep simulation
+data_exists = not prog.start_multistage_run(
+    overwrite_local_cache=True, n_steps=n_steps
+)
+proceed = not data_exists
+
+if proceed:
+    for step in range(n_steps):
+        pr.report_progress(
+            stage="Running Simulation", progress=100 * step // n_steps
+        )
+        proceed = prog.simulation_step()
+        # todo: report progress using rest API
+    pr.report_progress(stage="Running Simulation", progress=100)
+
+assert not proceed, "must be completed before this cell"
+if not data_exists:
+    prog.end_multistep_run()
+
+def adjust_weights(mc_file, power):
+    # converting weights to mimic required injection spectrum power
+    header = ""
+    with open(mc_file, "rt") as lines:
+        for line in lines:
+            if len(line) > 0 and line[0] == "#":
+                header += line[1:].strip() + "\n"
+            else:
+                break
+    weight_col = 2
+    E_src_col = 12
+    data = np.loadtxt(mc_file)
+    weight = data[:, weight_col - 1]
+    E_src = data[:, E_src_col - 1]
+    orig_power = (
+        prog.power_law
+    )  # CRBeam is always called with fixed power=1 to optimize cache usage
+    weight *= np.power(E_src / Emax, -(power - orig_power))
+    output_file = f"{mc_file}_p{power}"
+    np.savetxt(output_file, data, header=header.strip(), fmt="%.6g")
+    return output_file
+
+# this code will not execute program since files already exist on server
+# prog.run(force_overwrite=False)
+
+# one can upload cache explicitely by call
+# prog.upload_cache()
+
+pr.report_progress(stage="Building Plots and Tables", progress=0)
+
+print(prog.output_path)
+get_ipython().system("ls {prog.output_path}")   # noqa: F821
+
+mc_file = prog.output_path + "/photon"
+
+if Emax != EminSource:
+    mc_file = adjust_weights(mc_file, Gamma)
+
+# rotating the beam
+
+if EGMF > 0:
+    from mc_rotate import mc_rotate
+
+    mc_rotated_file = mc_rotate(
+        mc_file, jet_half_size, jet_direction, psf_deg=psf
+    )
+else:
+    mc_rotated_file = mc_file
+mc_rotated_file
+
+# reading the source distance in Mpc from the data file
+T_Mpc = lc.get_distance_Mpc(mc_file)
+T_Mpc
+
+from oda_api.data_products import ODAAstropyTable
+
+d = np.genfromtxt(mc_rotated_file, skip_header=3)
+# E/eV,weight,Theta,Phi,dT_raw/T	dT_calc/T,z_cascade_production,N_interactions,z_src,E_src/eV,z_prod
+names = (
+    "E/eV",
+    "weight",
+    "Theta",
+    "Phi",
+    "dT_raw/T",
+    "dT_calc/T",
+    "z_cascade_production",
+    "N_interactions",
+    "z_src",
+    "E_src/eV",
+    "z_prod",
+)
+res = ODAAstropyTable(Table(d, names=names))
+
+pr.report_progress(stage="Building Plots and Tables", progress=100)
+
+Event_file = res  # http://odahub.io/ontology#ODAAstropyTable
+
+# output gathering
+_galaxy_meta_data = {}
+_oda_outs = []
+_oda_outs.append(
+    ("out_Generate_events_Event_file", "Event_file_galaxy.output", Event_file)
+)
+
+for _outn, _outfn, _outv in _oda_outs:
+    _galaxy_outfile_name = os.path.join(_galaxy_wd, _outfn)
+    if isinstance(_outv, str) and os.path.isfile(_outv):
+        shutil.move(_outv, _galaxy_outfile_name)
+        _galaxy_meta_data[_outn] = {"ext": "_sniff_"}
+    elif getattr(_outv, "write_fits_file", None):
+        _outv.write_fits_file(_galaxy_outfile_name)
+        _galaxy_meta_data[_outn] = {"ext": "fits"}
+    elif getattr(_outv, "write_file", None):
+        _outv.write_file(_galaxy_outfile_name)
+        _galaxy_meta_data[_outn] = {"ext": "_sniff_"}
+    else:
+        with open(_galaxy_outfile_name, "w") as fd:
+            json.dump(_outv, fd, cls=CustomJSONEncoder)
+        _galaxy_meta_data[_outn] = {"ext": "json"}
+
+with open(os.path.join(_galaxy_wd, "galaxy.json"), "w") as fd:
+    json.dump(_galaxy_meta_data, fd)
+print("*** Job finished successfully ***")
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Generate_figures.py	Fri Apr 25 19:33:20 2025 +0000
@@ -0,0 +1,698 @@
+#!/usr/bin/env python
+# coding: utf-8
+
+#!/usr/bin/env python
+
+# This script is generated with nb2galaxy
+
+# flake8: noqa
+
+import json
+import os
+import shutil
+
+from oda_api.json import CustomJSONEncoder
+
+# oda:usesResource oda:CRBeamS3 .
+# oda:CRBeamS3 a oda:S3 .
+# oda:CRBeamS3 oda:resourceBindingEnvVarName "S3_CREDENTIALS" .
+
+src_name = "NGC 1365"  # http://odahub.io/ontology#AstrophysicalObject
+
+z_start = 0  # http://odahub.io/ontology#Float
+Npart = 2000  # http://odahub.io/ontology#Integer ; oda:lower_limit 1 ; oda:upper_limit 100000
+particle_type = "gamma"  # http://odahub.io/ontology#String ; oda:allowed_value "gamma","electron","proton"
+Emax = 30  # http://odahub.io/ontology#Energy_TeV
+Emin = 0.01  # http://odahub.io/ontology#Energy_TeV
+EminSource = 1.0  # http://odahub.io/ontology#Energy_TeV
+Gamma = 2.0  # http://odahub.io/ontology#Float
+EGMF_fG = 10  # http://odahub.io/ontology#Float
+lmaxEGMF_Mpc = 5  # http://odahub.io/ontology#Float
+jet_half_size = 5.0  # oda:AngleDegrees
+jet_direction = 0.0  # oda:AngleDegrees
+psf = 1.0  # oda:AngleDegrees
+window_size_RA = 2.0  # oda:AngleDegrees
+window_size_DEC = 1.0  # oda:AngleDegrees
+EBL = "Franceschini 2017"  # http://odahub.io/ontology#String ; oda:allowed_value "Franceschini 2017","Stecker 2016 lower limit","Stecker 2016 upper limit","Inoue 2012 Baseline","Inoue 2012 lower limit","Inoue 2012 upper limit"
+
+_galaxy_wd = os.getcwd()
+
+with open("inputs.json", "r") as fd:
+    inp_dic = json.load(fd)
+if "C_data_product_" in inp_dic.keys():
+    inp_pdic = inp_dic["C_data_product_"]
+else:
+    inp_pdic = inp_dic
+src_name = str(inp_pdic["src_name"])
+z_start = float(inp_pdic["z_start"])
+Npart = int(inp_pdic["Npart"])
+particle_type = str(inp_pdic["particle_type"])
+Emax = float(inp_pdic["Emax"])
+Emin = float(inp_pdic["Emin"])
+EminSource = float(inp_pdic["EminSource"])
+Gamma = float(inp_pdic["Gamma"])
+EGMF_fG = float(inp_pdic["EGMF_fG"])
+lmaxEGMF_Mpc = float(inp_pdic["lmaxEGMF_Mpc"])
+jet_half_size = float(inp_pdic["jet_half_size"])
+jet_direction = float(inp_pdic["jet_direction"])
+psf = float(inp_pdic["psf"])
+window_size_RA = float(inp_pdic["window_size_RA"])
+window_size_DEC = float(inp_pdic["window_size_DEC"])
+EBL = str(inp_pdic["EBL"])
+
+import os
+
+from astropy import units as u
+from astropy.coordinates import SkyCoord
+from astropy.nddata import StdDevUncertainty
+from astropy.table import Table
+from gammapy.maps import Map, MapAxis
+from oda_api.api import ProgressReporter
+from oda_api.data_products import (
+    LightCurveDataProduct,
+    NumpyDataProduct,
+    ODAAstropyTable,
+    PictureProduct,
+)
+from specutils import Spectrum1D
+from utils import find_redshift
+
+if z_start <= 0:
+    z_start = find_redshift(src_name)
+
+source = SkyCoord.from_name(src_name, frame="icrs", parse=False, cache=True)
+
+pr = ProgressReporter()
+pr.report_progress(
+    stage="Initialization",
+    progress=0,
+    substage="spectra",
+    subprogress=30,
+    message="some message",
+)
+
+import subprocess
+
+import light_curve as lc
+import numpy as np
+from crbeam import CRbeam
+from matplotlib import pyplot as plt
+from spec_plot import plot_spec
+
+EGMF = EGMF_fG * 1e-15
+
+# internal units are eV
+Emax *= 1e12
+Emin *= 1e12
+EminSource *= 1e12
+
+background = {
+    "Franceschini 2017": 12,
+    "Stecker 2016 lower limit": 10,
+    "Stecker 2016 upper limit": 11,
+    "Inoue 2012 Baseline": 3,
+    "Inoue 2012 lower limit": 4,
+    "Inoue 2012 upper limit": 5,
+}[EBL]
+
+prog = CRbeam(
+    z=z_start,
+    nparticles=Npart,
+    primary=particle_type,
+    emax=Emax,
+    emin=Emin,
+    emin_source=EminSource,
+    EGMF=EGMF,
+    lmaxEGMF=lmaxEGMF_Mpc,
+    background=background,
+)
+cmd = prog.command
+cmd
+
+# Here is one way to call CRbeam without global cache
+# prog.run(force_overwrite=False)
+# Here we call CRbeam with global cache
+# prog.run_cached(overwrite_local_cache=True)
+# to clear s3 cache run the following command
+# prog.remove_cache()
+
+# To report the progress we will split running CRbeam into 10 parts
+n_steps = 10
+# Initialize multistep simulation
+data_exists = not prog.start_multistage_run(
+    overwrite_local_cache=True, n_steps=n_steps
+)
+proceed = not data_exists
+
+if proceed:
+    for step in range(n_steps):
+        pr.report_progress(
+            stage="Running Simulation", progress=100 * step // n_steps
+        )
+        proceed = prog.simulation_step()
+        # todo: report progress using rest API
+    pr.report_progress(stage="Running Simulation", progress=100)
+
+assert not proceed, "must be completed before this cell"
+if not data_exists:
+    prog.end_multistep_run()
+
+def adjust_weights(mc_file, power):
+    # converting weights to mimic required injection spectrum power
+    header = ""
+    with open(mc_file, "rt") as lines:
+        for line in lines:
+            if len(line) > 0 and line[0] == "#":
+                header += line[1:].strip() + "\n"
+            else:
+                break
+    weight_col = 2
+    E_src_col = 12
+    data = np.loadtxt(mc_file)
+    weight = data[:, weight_col - 1]
+    E_src = data[:, E_src_col - 1]
+    orig_power = (
+        prog.power_law
+    )  # CRBeam is always called with fixed power=1 to optimize cache usage
+    weight *= np.power(E_src / Emax, -(power - orig_power))
+    output_file = f"{mc_file}_p{power}"
+    np.savetxt(output_file, data, header=header.strip(), fmt="%.6g")
+    return output_file
+
+# this code will not execute program since files already exist on server
+# prog.run(force_overwrite=False)
+
+# one can upload cache explicitely by call
+# prog.upload_cache()
+
+pr.report_progress(stage="Building Plots and Tables", progress=0)
+
+print(prog.output_path)
+get_ipython().system("ls {prog.output_path}")   # noqa: F821
+
+mc_file = prog.output_path + "/photon"
+
+# how the data looks like
+get_ipython().system("cat {mc_file} | awk 'NR<=5'")   # noqa: F821
+
+if Emax != EminSource:
+    mc_file = adjust_weights(mc_file, Gamma)
+
+# how the data looks like
+get_ipython().system("cat {mc_file} | awk 'NR<=5'")   # noqa: F821
+
+subprocess.run(
+    [
+        "bash",
+        os.path.join(os.environ.get("BASEDIR", os.getcwd()), "makeSpecE2.sh"),
+        mc_file,
+    ]
+)
+
+# rotating the beam
+
+if EGMF > 0:
+    from mc_rotate import mc_rotate
+
+    mc_rotated_file = mc_rotate(
+        mc_file, jet_half_size, jet_direction, psf_deg=psf
+    )
+else:
+    mc_rotated_file = mc_file
+mc_rotated_file
+
+subprocess.run(
+    [
+        "bash",
+        os.path.join(os.environ.get("BASEDIR", os.getcwd()), "makeSpecE2.sh"),
+        mc_rotated_file,
+    ]
+)
+
+# how the rotated data looks like
+get_ipython().system("cat {mc_rotated_file} | awk 'NR<=5'")   # noqa: F821
+
+# reading the source distance in Mpc from the data file
+T_Mpc = lc.get_distance_Mpc(mc_file)
+T_Mpc
+
+# building the spectrum figure for total flux
+
+spec_file = mc_file + ".spec"
+spec_fig = plot_spec(
+    spec_file, title="spectrum", ext="png", show=False, logscale=True
+)
+spec_image = PictureProduct.from_file(spec_fig)
+
+# building the spectrum figure for the flux within PSF
+spec_rotated_file = mc_rotated_file + ".spec"
+spec_rotated_fig = plot_spec(
+    spec_rotated_file,
+    title="spectrum",
+    Emin=Emin,
+    Emax=Emax,
+    ext="png",
+    show=False,
+    logscale=True,
+)
+spec_rotated_image = PictureProduct.from_file(spec_rotated_fig)
+
+spec_rotated_file
+
+lc_params = dict(
+    logscale=True,
+    max_t=-99,  # 8760000,
+    n_points=30,
+    psf=psf,
+    min_n_particles=10,
+    cut_0=True,
+)
+
+# building the light curve figure
+if EGMF > 0:
+    light_curve_fig = lc.make_plot(mc_rotated_file, **lc_params)
+    light_curve_image = PictureProduct.from_file(light_curve_fig)
+else:
+    # to avoid possible problems with absent figure
+    light_curve_image = PictureProduct.from_file(spec_fig)
+
+l_curve = None
+if EGMF > 0:
+    delay, weights = lc.get_counts(mc_rotated_file, **lc_params)
+    t, f, N = lc.light_curve(delay, weights, **lc_params)
+    l_curve = LightCurveDataProduct.from_arrays(
+        times=t * 3600.0,  # t is in hours
+        fluxes=f,
+        errors=f / np.sqrt(N),
+        time_format="unix",
+    )
+
+def convert_to_ICRS(phi: np.array, theta: np.array, source: SkyCoord):
+    # prime system has z-axes pointing the source centered at observer
+    # it is obtained by rotation of source system by 180 degrees about x-axis
+    # prime system coords have suffix "prime" (')
+    # icrs system coords has empty suffix
+    # TODO: add param ic_jet_plane_direction: SkyCoord - gives plane which contains jet
+    # by definition in prime system the jet is in y'-z' plane and z' axes points from the source towards the observer
+    # for simplicity we will first assume that prime frame is oriented as ICRS frame (use SkyOffsetFrame with rotation=0)
+
+    # coordinates of unit direction vector in prime system
+
+    # direction of event arrival at prime system
+    z_prime = np.cos(
+        theta / 180.0 * np.pi
+    )  # (-1)*(-1) = 1 (rotating system and tacking oposite vector)
+    r_xy_prime = np.sin(theta / 180.0 * np.pi)
+    x_prime = -r_xy_prime * np.cos(
+        phi / 180.0 * np.pi
+    )  # (1)*(-1) = -1 (rotating system and tacking oposite vector)
+    y_prime = r_xy_prime * np.sin(
+        phi / 180.0 * np.pi
+    )  # (-1)*(-1) = 1 (rotating system and tacking oposite vector)
+
+    print("x',y',z' =", x_prime, y_prime, z_prime)
+
+    # angle between z and z' axes
+    delta1 = 90 * u.deg - source.dec
+
+    print("source:", source.cartesian)
+    print("delta1 = ", delta1)
+
+    # rotating about y-axes
+
+    x1 = x_prime * np.cos(delta1) + z_prime * np.sin(delta1)
+    y1 = y_prime
+    z1 = -x_prime * np.sin(delta1) + z_prime * np.cos(delta1)
+
+    print("step 1: x,y,z =", x1, y1, z1)
+
+    # rotation to -RA about z axis
+    delta2 = source.ra
+    x = x1 * np.cos(delta2) - y1 * np.sin(delta2)
+    y = x1 * np.sin(delta2) + y1 * np.cos(delta2)
+    z = z1
+
+    print("x,y,z =", x, y, z)
+
+    event_coords = SkyCoord(
+        x=x, y=y, z=z, frame="icrs", representation_type="cartesian"
+    ).spherical
+    # print(event_coords.spherical)
+    return event_coords
+
+def LoadCubeTemplate(
+    mc_file: str,
+    source: SkyCoord,
+    Emax=1e15,
+    Emin=1e9,
+    bins_per_decade=20,
+    binsz=0.02,
+    theta_mult=1,
+    sec_component_mult=1,
+    remove_straight=False,
+    symmetric_split=0,
+):
+    pass
+
+    mc_data = np.loadtxt(mc_file)
+    E = mc_data[:, 0]
+    print(f"dataset energy: {np.min(E)} <= E/eV <= {np.max(E)}")
+    Theta = mc_data[:, 2] * theta_mult  # theta_mult is used for debug only
+    print(f"dataset Theta: {np.min(Theta)} <= Theta <= {np.max(Theta)}")
+    # idx = np.where((E >= Emin) & (E<=Emax) & (Theta<theta_max))[0]
+    print(f"Filters: {Emin} <= E/eV <= {Emax}")
+    condition = (E >= Emin) & (E <= Emax)
+    if remove_straight:
+        condition &= Theta > 0
+
+    idx = np.where(condition)[0]
+
+    print("filtered dataset length:", len(idx))
+
+    E = E[idx]
+    Theta = Theta[idx]
+    w = mc_data[:, 1][idx]
+    Phi = mc_data[:, 3][idx]
+    # t = mc_data[:,5][idx]
+    # t *= time_scale_hours
+    if sec_component_mult != 1:  # sec_component_mult is used for debug only
+        print("WARNING: Sec component multiplicity:", sec_component_mult)
+        w[Theta > 0] *= sec_component_mult
+
+    if len(idx) > 0:
+        #     print(f'{np.min(t)} <= t/h <= {np.max(t)}')
+        print(f"{np.min(E)} <= E <= {np.max(E)}")
+
+    energy_axis_name = "energy_true"
+
+    energy_axis = MapAxis.from_energy_bounds(
+        Emin * u.eV,
+        Emax * u.eV,
+        nbin=bins_per_decade,
+        name=energy_axis_name,
+        per_decade=True,
+    )
+
+    m_cube = Map.create(
+        binsz=binsz,
+        width=(window_size_RA * u.deg, window_size_DEC * u.deg),
+        frame="icrs",
+        axes=[energy_axis],
+        skydir=SkyCoord(source),
+    )
+
+    print(m_cube.geom)
+
+    if len(idx) > 0:
+        if symmetric_split > 1:
+            Theta = np.repeat(Theta, symmetric_split)
+            E = np.repeat(E, symmetric_split)
+            w = np.repeat(w, symmetric_split) / symmetric_split
+            Phi = np.random.random(len(Theta)) * 360
+
+        sc = convert_to_ICRS(Phi, Theta, source)
+        m_cube.fill_by_coord(
+            {"lat": sc.lat, "lon": sc.lon, energy_axis_name: E * u.eV},
+            weights=w,
+        )
+
+    return m_cube
+
+def LoadTemplate4D(
+    mc_file: str,
+    source: SkyCoord,
+    redshift,
+    Emax=1e15,
+    Emin=1e9,
+    bins_per_decade=20,
+    binsz=0.02,
+    theta_mult=1,
+    max_time_quantile=1.0,
+    n_time_bins=100,
+    symmetric_split=0,
+):
+    from astropy.cosmology import FlatLambdaCDM
+
+    cosmo = FlatLambdaCDM(
+        H0=67.8 * u.km / u.s / u.Mpc, Tcmb0=2.725 * u.K, Om0=(1 - 0.692)
+    )
+    time_scale_hours = float(
+        ((cosmo.age(0) - cosmo.age(z_start)) / u.h).decompose()
+    )
+
+    mc_data = np.loadtxt(mc_file)
+    E = mc_data[:, 0]
+
+    print("dataset length:", len(mc_data))
+    print(f"dataset energy: {np.min(E)} <= E/eV <= {np.max(E)}")
+    Theta = mc_data[:, 2] * theta_mult  # theta_mult is used for debug only
+    print(f"dataset Theta: {np.min(Theta)} <= Theta <= {np.max(Theta)}")
+    # idx = np.where((E >= Emin) & (E<=Emax) & (Theta<theta_max))[0]
+
+    idx = np.where((E >= Emin) & (E <= Emax))[0]
+
+    print(f"Filters: {Emin} <= E/eV <= {Emax}")
+    print("filtered dataset length:", len(idx))
+
+    E = E[idx]
+    Theta = Theta[idx]
+    w = mc_data[:, 1][idx]
+    Phi = mc_data[:, 3][idx]
+    t = mc_data[:, 5][idx]
+    t *= time_scale_hours
+    min_time = np.min(t)
+    if max_time_quantile >= 1.0:
+        max_time = np.max(t)
+    else:
+        max_time = np.quantile(t, max_time_quantile)
+
+    if max_time == min_time:
+        max_time = min_time + 1e-10
+
+    idx = np.where(t <= max_time)[0]
+    print(f"Filters: {Emin} <= E/eV <= {Emax}, t/h <= {max_time}")
+    print("filtered dataset length:", len(idx))
+
+    E = E[idx]
+    Theta = Theta[idx]
+    w = w[idx]
+    Phi = Phi[idx]
+    t = t[idx]
+
+    energy_axis_name = "energy_true"
+
+    energy_axis = MapAxis.from_energy_bounds(
+        Emin * u.eV,
+        Emax * u.eV,
+        nbin=bins_per_decade,
+        name=energy_axis_name,
+        per_decade=True,
+    )
+
+    from astropy.time import Time
+
+    reference_time = Time(0, format="unix")
+
+    delta = max_time - min_time
+    (min_time + delta * np.linspace(0, 1, n_time_bins + 1)) * u.h
+    # time_axis = TimeMapAxis.from_time_edges(
+    #     time_min=time_edges[:-1],
+    #     time_max=time_edges[1:],
+    #     interp="lin",
+    #     unit=u.h,
+    #     name='time',
+    #     reference_time=reference_time
+    #  )
+
+    time_axis = MapAxis.from_bounds(
+        min_time, max_time, n_time_bins, name="time", unit=u.h
+    )
+
+    # time_axis = TimeMapAxis(time_edges[:-1], time_edges[1:], reference_time, name='time', interp='lin')
+
+    # time_axis = TimeMapAxis.from_time_bounds(min_time, max_time, n_time_bins, unit=u.h, name='time')
+
+    map4d = Map.create(
+        binsz=binsz,
+        width=(window_size_RA * u.deg, window_size_DEC * u.deg),
+        frame="icrs",
+        axes=[energy_axis, time_axis],
+        skydir=SkyCoord(source),
+    )
+
+    print(map4d.geom)
+
+    if len(idx) > 0:
+        if symmetric_split > 1:
+            Theta = np.repeat(Theta, symmetric_split)
+            E = np.repeat(E, symmetric_split)
+            t = np.repeat(t, symmetric_split)
+            w = np.repeat(w, symmetric_split) / symmetric_split
+            Phi = np.random.random(len(Theta)) * 360
+
+        sc = convert_to_ICRS(Phi, Theta, source)
+        map4d.fill_by_coord(
+            {
+                "lat": sc.lat,
+                "lon": sc.lon,
+                energy_axis_name: E * u.eV,
+                "time": t * u.h,
+            },
+            weights=w,
+        )
+
+    return map4d
+
+bins_per_decade = 20
+
+symmetric_split = 0 if jet_direction != 0 else 100
+
+map3d = LoadCubeTemplate(
+    mc_rotated_file,
+    source=source,
+    Emin=Emin,
+    Emax=Emax,
+    bins_per_decade=bins_per_decade,
+    symmetric_split=symmetric_split,
+)
+
+map3d.write("map3d.fits", overwrite=True)
+
+map4d = LoadTemplate4D(
+    mc_rotated_file,
+    source=source,
+    redshift=z_start,
+    Emin=Emin,
+    Emax=Emax,
+    bins_per_decade=bins_per_decade,
+    symmetric_split=symmetric_split,
+)
+map4d.write("map4d.fits", overwrite=True)
+
+d = np.genfromtxt(spec_file)
+ee = d[:, 0]
+ff = d[:, 1]
+ff_err = ff / np.sqrt(d[:, 2])
+plt.errorbar(ee, ff, yerr=ff_err, label="total flux")
+d = np.genfromtxt(spec_rotated_file)
+ee1 = d[:, 0]
+ff1 = d[:, 1]
+ff_err1 = ff1 / np.sqrt(d[:, 2])
+plt.errorbar(ee1, ff1, yerr=ff_err1, label="PSF flux")
+plt.xscale("log")
+plt.yscale("log")
+plt.xlabel("Energy, eV")
+plt.ylabel("$E^2dN/dE$, arbitrary units")
+plt.legend(loc="lower right")
+plt.savefig("Spectrum.png", format="png", bbox_inches="tight")
+
+bin_image = PictureProduct.from_file("Spectrum.png")
+
+data = [ee, ff, ff_err]
+names = ("Energy", "Total_flux", "Total_flux_err")
+table = ODAAstropyTable(Table(data, names=names))
+data = [ee, ff, ff_err]
+names = ("Energy", "PSF_flux", "PSF_flux_err")
+table1 = ODAAstropyTable(Table(data, names=names))
+
+flux_unit = u.eV / u.cm / u.cm / u.s / u.sr
+spec = Spectrum1D(
+    spectral_axis=ee * u.eV,
+    flux=ff * flux_unit,
+    uncertainty=StdDevUncertainty(ff_err),
+)
+spec.write("spec.fits", overwrite=True)
+dp = NumpyDataProduct.from_fits_file("spec.fits")
+spec_rotated = Spectrum1D(
+    spectral_axis=ee1 * u.eV,
+    flux=ff1 * flux_unit,
+    uncertainty=StdDevUncertainty(ff_err1),
+)
+spec_rotated.write("spec_rotated.fits", overwrite=True)
+dp_rotated = NumpyDataProduct.from_fits_file("spec_rotated.fits")
+map3d = NumpyDataProduct.from_fits_file("map3d.fits")
+map4d = NumpyDataProduct.from_fits_file("map4d.fits")
+
+pr.report_progress(stage="Building Plots and Tables", progress=100)
+
+spectrum_png = bin_image  # http://odahub.io/ontology#ODAPictureProduct
+light_curve_png = (
+    light_curve_image  # http://odahub.io/ontology#ODAPictureProduct
+)
+total_spectrum_table = table  # http://odahub.io/ontology#ODAAstropyTable
+psf_spectrum_table = table1  # http://odahub.io/ontology#ODAAstropyTable
+lc_result = l_curve  # http://odahub.io/ontology#LightCurve
+spectrum = dp  # http://odahub.io/ontology#Spectrum
+spectrum_rotated = dp_rotated  # http://odahub.io/ontology#Spectrum
+map3d = map3d  # http://odahub.io/ontology#Spectrum
+map4d = map4d  # http://odahub.io/ontology#Spectrum
+
+# spec_png = spec_image # http://odahub.io/ontology#ODAPictureProduct
+# spec_rotated_png = spec_rotated_image # http://odahub.io/ontology#ODAPictureProduct
+
+# output gathering
+_galaxy_meta_data = {}
+_oda_outs = []
+_oda_outs.append(
+    (
+        "out_Generate_figures_spectrum_png",
+        "spectrum_png_galaxy.output",
+        spectrum_png,
+    )
+)
+_oda_outs.append(
+    (
+        "out_Generate_figures_light_curve_png",
+        "light_curve_png_galaxy.output",
+        light_curve_png,
+    )
+)
+_oda_outs.append(
+    (
+        "out_Generate_figures_total_spectrum_table",
+        "total_spectrum_table_galaxy.output",
+        total_spectrum_table,
+    )
+)
+_oda_outs.append(
+    (
+        "out_Generate_figures_psf_spectrum_table",
+        "psf_spectrum_table_galaxy.output",
+        psf_spectrum_table,
+    )
+)
+_oda_outs.append(
+    ("out_Generate_figures_lc_result", "lc_result_galaxy.output", lc_result)
+)
+_oda_outs.append(
+    ("out_Generate_figures_spectrum", "spectrum_galaxy.output", spectrum)
+)
+_oda_outs.append(
+    (
+        "out_Generate_figures_spectrum_rotated",
+        "spectrum_rotated_galaxy.output",
+        spectrum_rotated,
+    )
+)
+_oda_outs.append(("out_Generate_figures_map3d", "map3d_galaxy.output", map3d))
+_oda_outs.append(("out_Generate_figures_map4d", "map4d_galaxy.output", map4d))
+
+for _outn, _outfn, _outv in _oda_outs:
+    _galaxy_outfile_name = os.path.join(_galaxy_wd, _outfn)
+    if isinstance(_outv, str) and os.path.isfile(_outv):
+        shutil.move(_outv, _galaxy_outfile_name)
+        _galaxy_meta_data[_outn] = {"ext": "_sniff_"}
+    elif getattr(_outv, "write_fits_file", None):
+        _outv.write_fits_file(_galaxy_outfile_name)
+        _galaxy_meta_data[_outn] = {"ext": "fits"}
+    elif getattr(_outv, "write_file", None):
+        _outv.write_file(_galaxy_outfile_name)
+        _galaxy_meta_data[_outn] = {"ext": "_sniff_"}
+    else:
+        with open(_galaxy_outfile_name, "w") as fd:
+            json.dump(_outv, fd, cls=CustomJSONEncoder)
+        _galaxy_meta_data[_outn] = {"ext": "json"}
+
+with open(os.path.join(_galaxy_wd, "galaxy.json"), "w") as fd:
+    json.dump(_galaxy_meta_data, fd)
+print("*** Job finished successfully ***")
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/cache.py	Fri Apr 25 19:33:20 2025 +0000
@@ -0,0 +1,112 @@
+import os
+import json
+
+from minio import Minio
+
+# from minio.error import S3Error
+
+
+class CRbeamCache(object):
+    credentials_env_var = "S3_CREDENTIALS"
+    bucket_name = "crbeam"
+    chunk_size = 32 * 1024
+
+    # default credentials
+    endpoint = "play.min.io"
+    access_key = "Q3AM3UQ867SPQQA43P2F"
+    secret_key = "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"
+
+    def __init__(self) -> None:
+        credentials = os.getenv(self.credentials_env_var)
+        if credentials is not None:
+            credentials = json.loads(credentials)
+            self.endpoint = credentials["endpoint"]
+            self.access_key = credentials["access_key"]
+            self.secret_key = credentials["secret_key"]
+
+        self.client = Minio(
+            self.endpoint,
+            access_key=self.access_key,
+            secret_key=self.secret_key,
+        )
+
+        # Make bucket if not exist.
+        if not self.client.bucket_exists(self.bucket_name):
+            self.client.make_bucket(self.bucket_name)
+
+    def save(self, obj_name, file_path, append_mode=True, **params):
+        if append_mode:
+            idx = 0
+            for _ in self.client.list_objects(
+                self.bucket_name, prefix=obj_name
+            ):
+                idx += 1
+            obj_path = obj_name + f"-{idx}"
+        else:
+            obj_path = obj_name
+        self.client.fput_object(
+            self.bucket_name, obj_path, file_path, metadata=params
+        )
+        return obj_path
+
+    def get_cache_size(self, prefix):
+        size = 0
+        for obj in self.client.list_objects(self.bucket_name, prefix=prefix):
+            size += int(
+                obj.object_name[len(prefix):].split("-")[0]
+            )  # todo: load number of particles from metadata
+        return size
+
+    def load_file(self, output_path, obj_name):
+        try:
+            response = self.client.get_object(self.bucket_name, obj_name)
+        except Exception:
+            raise ValueError("object not found")
+        try:
+            # Read data from response.
+            with open(output_path, "wb") as file:
+                for d in response.stream(self.chunk_size):
+                    file.write(d)
+        finally:
+            response.close()
+            response.release_conn()
+
+    def load_results(self, output_path, prefix, skip_paths=[]):
+        for obj in self.client.list_objects(self.bucket_name, prefix=prefix):
+            print("found", obj.object_name)
+            if obj.object_name not in skip_paths:
+                # Get data of an object.
+                response = self.client.get_object(
+                    self.bucket_name, obj.object_name
+                )
+                try:
+                    # Append the object data to a local file
+                    with open(output_path, "ab") as file:
+                        # todo: cut first line
+                        for d in response.stream(self.chunk_size):
+                            file.write(d)
+                    # Read data from response.
+                finally:
+                    response.close()
+                    response.release_conn()
+
+    def detete_results(self, prefix):
+        for obj in self.client.list_objects(self.bucket_name, prefix=prefix):
+            self.client.remove_object(self.bucket_name, obj.object_name)
+
+
+if __name__ == "__main__":
+    # test code
+    obj_name_prefix = "photon_z0.1_E_1e+07_1e+13_B1e-15_L0.05-5_N"
+    n_particles = 10000
+    root_path = "/Users/ok/git/mcray/bin/"
+    particle = "photon"
+    obj_name = obj_name_prefix + str(n_particles)
+    file_dir = root_path + obj_name + "/z0"
+    file_path = file_dir + "/" + particle
+    c = CRbeamCache()
+    c.save(obj_name, file_path, n_particles=n_particles)
+    print(file_path + " sent to s3")
+    loaded_file_path = file_path + ".loaded"
+    c.load_results(loaded_file_path, obj_name)
+    print(loaded_file_path + " loaded from s3")
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/crbeam.py	Fri Apr 25 19:33:20 2025 +0000
@@ -0,0 +1,256 @@
+import subprocess
+import sys
+import tempfile
+from os import makedirs, path
+from os.path import join
+
+import numpy as np
+
+
+class objdict(dict):
+    def __getattr__(self, name):
+        if name in self:
+            return self[name]
+        else:
+            raise AttributeError("No such attribute: " + name)
+
+    def __setattr__(self, name, value):
+        self[name] = value
+
+    def __delattr__(self, name):
+        if name in self:
+            del self[name]
+        else:
+            raise AttributeError("No such attribute: " + name)
+
+
+class CRbeam(object):
+    default_parameters = dict(
+        nparticles=10000,
+        z=0.1,
+        emax=1e13,
+        emin=1e7,
+        emin_source=None,  # if None, emin is used
+        primary="photon",
+        EGMF=0.0,
+        lmaxEGMF=5,
+        lminEGMF=0.05,
+        background=12,
+    )
+
+    particles = dict(
+        electron=0, positron=1, photon=2, gamma=2, neutron=9, proton=10
+    )
+
+    prog_name = "crbeam"
+
+    # Here we always use fixed power law E^{-1} for MC and then adjust weights for the events if needed
+    power_law = 1
+
+    def __init__(self, **kwargs):
+        self._step = 0
+        self._cache = None
+        self._size_per_step = 100
+        p = objdict(self.default_parameters)
+        p.update(kwargs)
+        if p.emin_source is None:
+            p.emin_source = p.emin
+
+        self._cache_name_prefix = (
+            f"{p.primary}_z{p.z}_E_{p.emin:g}_{p.emax:g}_{p.background}"
+        )
+        if p.emin_source < p.emax:
+            self._cache_name_prefix += (
+                f"_pow{self.power_law}_Emin{p.emin_source:g}"
+            )
+        if p.EGMF > 0:
+            self._cache_name_prefix += (
+                f"_B{p.EGMF:g}_turbL{p.lminEGMF}-{p.lmaxEGMF}"
+            )
+
+        self._cache_name_prefix += "_N"
+
+        self._output_dir = self._cache_name_prefix + f"{p.nparticles}"
+
+        p.primary = self.particles[p.primary]
+
+        self.p = p
+
+    @property
+    def output_dir(self):
+        return self._output_dir
+
+    @property
+    def cache(self):
+        from cache import CRbeamCache
+
+        if self._cache is None:
+            self._cache = CRbeamCache()
+        return self._cache
+
+    @property
+    def command(self):
+        result = f"{self.prog_name} "
+        result += " ".join(
+            [f"--{key} {value}" for key, value in self.p.items()]
+        )
+        result += " --output " + self.output_dir
+        power = 1 if self.p.emin_source < self.p.emax else -1000
+        result += f" --power {power}"
+        if self.p.EGMF > 0:
+            # use turbulent magnetic field with unique random orientation for all particles
+            result += " -mft -mfr"
+        return result
+
+    @property
+    def output_path(self):
+        return join(self.output_dir, "z0")
+
+    def run(self, force_overwrite):
+        command_suffix = ""
+        if force_overwrite:
+            command_suffix = " --overwrite"
+        if path.isdir(self.output_path) and not force_overwrite:
+            return
+        logs_dir = "logs"
+        makedirs(logs_dir, exist_ok=True)
+        log_file = f"{logs_dir}/{self.output_dir}.log"
+        with tempfile.NamedTemporaryFile() as script_file:
+            with open(script_file.name, "wt") as out_script:
+                print("#!/bin/bash", file=out_script)
+                print(
+                    self.command + command_suffix,
+                    "2>&1 1>" + log_file,
+                    file=out_script,
+                )
+            subprocess.run(["bash", script_file.name])
+        return log_file
+
+    def run_cached(self, overwrite_local_cache=False):
+        import shutil
+
+        output_root = self.output_dir
+        if path.isdir(output_root):
+            if not overwrite_local_cache:
+                return self.output_path
+            shutil.rmtree(output_root)
+
+        size = self.cache.get_cache_size(self._cache_name_prefix)
+        print("s3 data size: ", size)
+        skip_paths = []
+        makedirs(self.output_path, exist_ok=False)
+        if size < self.p.nparticles:
+            try:
+                self.p.nparticles = self.p.nparticles - size
+                self.run(force_overwrite=True)
+                skip_paths.append(self.upload_cache())
+            finally:
+                self.p.nparticles = self.p.nparticles + size
+        else:
+            self.p.nparticles = size
+
+        if size > 0:  # append results from s3 cache to the local cach file
+            self.cache.load_results(
+                self.output_path + "/photon",
+                self._cache_name_prefix,
+                skip_paths=skip_paths,
+            )
+
+        return self.output_path
+
+    def start_multistage_run(self, overwrite_local_cache=False, n_steps=10):
+        """
+        Initialize environment for running different parts of simulation in subsiquent calls of simulation_step
+        This function is useful if we show progress by running code in different jupyter cells
+        :param overwrite_local_cache: remove local cache
+        :param n_steps: number intermediate steps
+        :return: True if further simulation is needed, False if data is already available
+        """
+        import shutil
+
+        output_root = self.output_dir
+        if path.isdir(output_root):
+            if not overwrite_local_cache:
+                return False
+            shutil.rmtree(output_root)
+
+        size = self.cache.get_cache_size(self._cache_name_prefix)
+        print("s3 data size: ", size)
+        if size >= self.p.nparticles:
+            makedirs(self.output_path, exist_ok=False)
+            self.cache.load_results(
+                self.output_path + "/photon", self._cache_name_prefix
+            )
+            self.p.nparticles = size
+            return False
+        self.p.nparticles = self.p.nparticles - size
+        self._size_per_step = max(
+            100, int(np.ceil((self.p.nparticles) / n_steps))
+        )
+        self._step = 0
+        return True
+
+    def simulation_step(self):
+        size = self._size_per_step * self._step
+        target_nparticles = self.p.nparticles
+        if size >= target_nparticles:
+            return False
+        save_output_dir = self._output_dir
+        try:
+            adjusted_size_per_step = min(
+                self._size_per_step, target_nparticles - size
+            )
+            self.p.nparticles = adjusted_size_per_step
+            self._step += 1
+            self._output_dir = f"{self._output_dir}_step{self._step}"
+            self.run(force_overwrite=True)
+        finally:
+            self.p.nparticles = target_nparticles
+            self._output_dir = save_output_dir
+
+        return size + adjusted_size_per_step < target_nparticles
+
+    def end_multistep_run(self):
+        makedirs(self.output_path, exist_ok=False)
+        with tempfile.NamedTemporaryFile() as script_file:
+            with open(script_file.name, "wt") as task:
+                print(
+                    f"cat {self.output_dir}_step*/z0/photon >"
+                    f" {self.output_path}/photon",
+                    file=task,
+                )
+            subprocess.run(["bash", script_file.name])
+        try:
+            skip_paths = [self.upload_cache()]
+            self.cache.load_results(
+                self.output_path + "/photon",
+                self._cache_name_prefix,
+                skip_paths=skip_paths,
+            )
+            self.p.nparticles = self.cache.get_cache_size(
+                self._cache_name_prefix
+            )
+        except Exception as ex:
+            print(ex, file=sys.stderr)
+        return self.output_path
+
+    def upload_cache(self):
+        source_file = self.output_path + "/photon"
+        if not path.isfile(source_file):
+            if path.isdir(self.output_path):
+                print("empty result")
+                return None
+            else:
+                assert False, "result dir not found"
+        obj_name = self._cache_name_prefix + f"{self.p.nparticles}"
+        print("saving", source_file, "to s3 as", obj_name)
+        return self.cache.save(obj_name, source_file)
+
+    def remove_cache(self):
+        from cache import CRbeamCache
+
+        c = CRbeamCache()
+        prefix = (
+            self._cache_name_prefix
+        )  # todo: use metadata when metadata reading is implemented
+        c.detete_results(prefix)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/crbeam_astro_tool.xml	Fri Apr 25 19:33:20 2025 +0000
@@ -0,0 +1,260 @@
+<tool id="crbeam_astro_tool" name="CRbeam" version="0.0.2+galaxy0" profile="24.0">
+  <requirements>
+    <requirement type="package" version="1.1.1">crbeam</requirement>
+    <requirement type="package" version="19.0.1">pyarrow</requirement>
+    <requirement type="package" version="6.0">unzip</requirement>
+    <requirement type="package" version="1.2.37">oda-api</requirement>
+    <requirement type="package" version="3.9.4">matplotlib</requirement>
+    <requirement type="package" version="0.13.2">seaborn</requirement>
+    <requirement type="package" version="7.2.15">minio</requirement>
+    <requirement type="package" version="1.19.0">specutils</requirement>
+    <requirement type="package" version="1.11.4">scipy</requirement>
+    <requirement type="package" version="1.3">gammapy</requirement>
+    <requirement type="package" version="0.4.10">astroquery</requirement>
+    <requirement type="package" version="5.3">astropy</requirement>
+    <requirement type="package" version="9.1.0">ipython</requirement>
+  </requirements>
+  <command detect_errors="exit_code">ipython '$__tool_directory__/${C_data_product_.DPselector_}.py'</command>
+  <environment_variables>
+    <environment_variable name="BASEDIR">$__tool_directory__</environment_variable>
+    <environment_variable name="GALAXY_TOOL_DIR">$__tool_directory__</environment_variable>
+  </environment_variables>
+  <configfiles>
+    <inputs name="inputs" filename="inputs.json" data_style="paths" />
+  </configfiles>
+  <inputs>
+    <conditional name="C_data_product_">
+      <param name="DPselector_" type="select" label="Data Product">
+        <option value="Generate_events" selected="true">Generate_events</option>
+        <option value="Generate_figures" selected="false">Generate_figures</option>
+      </param>
+      <when value="Generate_events">
+        <param name="src_name" type="text" value="1ES 1215+303" label="src_name" optional="false" />
+        <param name="z_start" type="float" value="0.13" label="z_start" optional="false" />
+        <param name="Npart" type="integer" value="10000" label="Npart" min="1" max="100000" optional="false" />
+        <param name="particle_type" type="select" label="particle_type" optional="false">
+          <option value="electron">electron</option>
+          <option value="gamma" selected="true">gamma</option>
+          <option value="proton">proton</option>
+        </param>
+        <param name="Emax" type="float" value="50" label="Emax (unit: TeV)" optional="false" />
+        <param name="Emin" type="float" value="0.01" label="Emin (unit: TeV)" optional="false" />
+        <param name="EminSource" type="float" value="0.01" label="EminSource (unit: TeV)" optional="false" />
+        <param name="Gamma" type="float" value="2.0" label="Gamma" optional="false" />
+        <param name="EGMF_fG" type="float" value="100" label="EGMF_fG" optional="false" />
+        <param name="lmaxEGMF_Mpc" type="float" value="5" label="lmaxEGMF_Mpc" optional="false" />
+        <param name="jet_half_size" type="float" value="180.0" label="jet_half_size (unit: deg)" optional="false" />
+        <param name="jet_direction" type="float" value="0.0" label="jet_direction (unit: deg)" optional="false" />
+        <param name="psf" type="float" value="180.0" label="psf (unit: deg)" optional="false" />
+        <param name="window_size_RA" type="float" value="4.0" label="window_size_RA (unit: deg)" optional="false" />
+        <param name="window_size_DEC" type="float" value="4.0" label="window_size_DEC (unit: deg)" optional="false" />
+        <param name="EBL" type="select" label="EBL" optional="false">
+          <option value="Franceschini 2017" selected="true">Franceschini 2017</option>
+          <option value="Inoue 2012 Baseline">Inoue 2012 Baseline</option>
+          <option value="Inoue 2012 lower limit">Inoue 2012 lower limit</option>
+          <option value="Inoue 2012 upper limit">Inoue 2012 upper limit</option>
+          <option value="Stecker 2016 lower limit">Stecker 2016 lower limit</option>
+          <option value="Stecker 2016 upper limit">Stecker 2016 upper limit</option>
+        </param>
+      </when>
+      <when value="Generate_figures">
+        <param name="src_name" type="text" value="NGC 1365" label="src_name" optional="false" />
+        <param name="z_start" type="float" value="0" label="z_start" optional="false" />
+        <param name="Npart" type="integer" value="2000" label="Npart" min="1" max="100000" optional="false" />
+        <param name="particle_type" type="select" label="particle_type" optional="false">
+          <option value="electron">electron</option>
+          <option value="gamma" selected="true">gamma</option>
+          <option value="proton">proton</option>
+        </param>
+        <param name="Emax" type="float" value="30" label="Emax (unit: TeV)" optional="false" />
+        <param name="Emin" type="float" value="0.01" label="Emin (unit: TeV)" optional="false" />
+        <param name="EminSource" type="float" value="1.0" label="EminSource (unit: TeV)" optional="false" />
+        <param name="Gamma" type="float" value="2.0" label="Gamma" optional="false" />
+        <param name="EGMF_fG" type="float" value="10" label="EGMF_fG" optional="false" />
+        <param name="lmaxEGMF_Mpc" type="float" value="5" label="lmaxEGMF_Mpc" optional="false" />
+        <param name="jet_half_size" type="float" value="5.0" label="jet_half_size (unit: deg)" optional="false" />
+        <param name="jet_direction" type="float" value="0.0" label="jet_direction (unit: deg)" optional="false" />
+        <param name="psf" type="float" value="1.0" label="psf (unit: deg)" optional="false" />
+        <param name="window_size_RA" type="float" value="2.0" label="window_size_RA (unit: deg)" optional="false" />
+        <param name="window_size_DEC" type="float" value="1.0" label="window_size_DEC (unit: deg)" optional="false" />
+        <param name="EBL" type="select" label="EBL" optional="false">
+          <option value="Franceschini 2017" selected="true">Franceschini 2017</option>
+          <option value="Inoue 2012 Baseline">Inoue 2012 Baseline</option>
+          <option value="Inoue 2012 lower limit">Inoue 2012 lower limit</option>
+          <option value="Inoue 2012 upper limit">Inoue 2012 upper limit</option>
+          <option value="Stecker 2016 lower limit">Stecker 2016 lower limit</option>
+          <option value="Stecker 2016 upper limit">Stecker 2016 upper limit</option>
+        </param>
+      </when>
+    </conditional>
+  </inputs>
+  <outputs>
+    <data label="${tool.name} -&gt; Generate_events Event_file" name="out_Generate_events_Event_file" format="auto" from_work_dir="Event_file_galaxy.output">
+      <filter>C_data_product_['DPselector_'] == 'Generate_events'</filter>
+    </data>
+    <data label="${tool.name} -&gt; Generate_figures spectrum_png" name="out_Generate_figures_spectrum_png" format="auto" from_work_dir="spectrum_png_galaxy.output">
+      <filter>C_data_product_['DPselector_'] == 'Generate_figures'</filter>
+    </data>
+    <data label="${tool.name} -&gt; Generate_figures light_curve_png" name="out_Generate_figures_light_curve_png" format="auto" from_work_dir="light_curve_png_galaxy.output">
+      <filter>C_data_product_['DPselector_'] == 'Generate_figures'</filter>
+    </data>
+    <data label="${tool.name} -&gt; Generate_figures total_spectrum_table" name="out_Generate_figures_total_spectrum_table" format="auto" from_work_dir="total_spectrum_table_galaxy.output">
+      <filter>C_data_product_['DPselector_'] == 'Generate_figures'</filter>
+    </data>
+    <data label="${tool.name} -&gt; Generate_figures psf_spectrum_table" name="out_Generate_figures_psf_spectrum_table" format="auto" from_work_dir="psf_spectrum_table_galaxy.output">
+      <filter>C_data_product_['DPselector_'] == 'Generate_figures'</filter>
+    </data>
+    <data label="${tool.name} -&gt; Generate_figures lc_result" name="out_Generate_figures_lc_result" format="auto" from_work_dir="lc_result_galaxy.output">
+      <filter>C_data_product_['DPselector_'] == 'Generate_figures'</filter>
+    </data>
+    <data label="${tool.name} -&gt; Generate_figures spectrum" name="out_Generate_figures_spectrum" format="auto" from_work_dir="spectrum_galaxy.output">
+      <filter>C_data_product_['DPselector_'] == 'Generate_figures'</filter>
+    </data>
+    <data label="${tool.name} -&gt; Generate_figures spectrum_rotated" name="out_Generate_figures_spectrum_rotated" format="auto" from_work_dir="spectrum_rotated_galaxy.output">
+      <filter>C_data_product_['DPselector_'] == 'Generate_figures'</filter>
+    </data>
+    <data label="${tool.name} -&gt; Generate_figures map3d" name="out_Generate_figures_map3d" format="auto" from_work_dir="map3d_galaxy.output">
+      <filter>C_data_product_['DPselector_'] == 'Generate_figures'</filter>
+    </data>
+    <data label="${tool.name} -&gt; Generate_figures map4d" name="out_Generate_figures_map4d" format="auto" from_work_dir="map4d_galaxy.output">
+      <filter>C_data_product_['DPselector_'] == 'Generate_figures'</filter>
+    </data>
+  </outputs>
+  <tests>
+    <test expect_num_outputs="1">
+      <conditional name="C_data_product_">
+        <param name="DPselector_" value="Generate_events" />
+        <param name="src_name" value="1ES 1215+303" />
+        <param name="z_start" value="0.13" />
+        <param name="Npart" value="10000" />
+        <param name="particle_type" value="gamma" />
+        <param name="Emax" value="50" />
+        <param name="Emin" value="0.01" />
+        <param name="EminSource" value="0.01" />
+        <param name="Gamma" value="2.0" />
+        <param name="EGMF_fG" value="100" />
+        <param name="lmaxEGMF_Mpc" value="5" />
+        <param name="jet_half_size" value="180.0" />
+        <param name="jet_direction" value="0.0" />
+        <param name="psf" value="180.0" />
+        <param name="window_size_RA" value="4.0" />
+        <param name="window_size_DEC" value="4.0" />
+        <param name="EBL" value="Franceschini 2017" />
+      </conditional>
+      <assert_stdout>
+        <has_text text="*** Job finished successfully ***" />
+      </assert_stdout>
+    </test>
+    <test expect_num_outputs="9">
+      <conditional name="C_data_product_">
+        <param name="DPselector_" value="Generate_figures" />
+        <param name="src_name" value="NGC 1365" />
+        <param name="z_start" value="0" />
+        <param name="Npart" value="2000" />
+        <param name="particle_type" value="gamma" />
+        <param name="Emax" value="30" />
+        <param name="Emin" value="0.01" />
+        <param name="EminSource" value="1.0" />
+        <param name="Gamma" value="2.0" />
+        <param name="EGMF_fG" value="10" />
+        <param name="lmaxEGMF_Mpc" value="5" />
+        <param name="jet_half_size" value="5.0" />
+        <param name="jet_direction" value="0.0" />
+        <param name="psf" value="1.0" />
+        <param name="window_size_RA" value="2.0" />
+        <param name="window_size_DEC" value="1.0" />
+        <param name="EBL" value="Franceschini 2017" />
+      </conditional>
+      <assert_stdout>
+        <has_text text="*** Job finished successfully ***" />
+      </assert_stdout>
+    </test>
+  </tests>
+  <help>CRbeam
+======
+
+This service provides simulation [1] of electron-photon cascade
+propagation in the intergalactic space. The simulation accounts for the
+interactions of photons and electrons with extragalactic background
+light (EBL) and magnetic field. As a first step and main part of
+simulation the code traces trajectories of every particle and records
+it&#8217;s position and momentum at *z=0* assuming that initially particles
+were emitted at fixed redshift *z_start* with given power law spectrum
+in the direction of *z*-axis. The source jet geometry is then taken into
+account in the second step by rotating the source image obtained during
+the first stage within the jet openning angle. This allows to reuse the
+simulation results for different jet geometries. The tool has two
+available data products:
+
+- list of events
+- list of figures
+
+Both modes have common list of input parameters:
+
+*src_name* e.g.&#160;&#8216;GRB221009A&#8217; - optional source name from Simbad database
+(ignored if *z_start* is given)
+
+*z_start* e.g.&#160;0.1 - source redshift or 0 if src_name should be used
+
+*Npart* e.g.&#160;100000 - number of particles to simulate
+
+*particle_type* - initial particle type, one of &#8220;gamma&#8221;,&#8220;electron&#8221; or
+&#8220;proton&#8221;
+
+*Emax* - maximal energy of particles emitted by the source in TeV
+
+*Emin* - minimal energy in TeV of secondary particles to trace in the
+simulation
+
+*EminSource* - minimal energy of particles emitted by the source in TeV.
+If *EminSourceTeV=EmaxTeV* fixed energy injection is assumed
+
+*Gamma* - injection spectrum power low index: dN/dE = E^{-gamma}
+
+*EGMF_fG* - amplitude of the intergalactic magnetic field in femtogauss.
+The Kolmogorov turbulence model is assumed for the magnetic field
+
+*lmaxEGMF_Mpc* - maximal scale of the magnetic field turbulence in Mpc
+
+*jet_half_size* - jet half size in degrees
+
+*jet_direction* - angle in degrees between the jet axis and the
+direction to the observer
+
+*psf* - observer instrument psf in degrees
+
+*EBL* - extragalactic background light model to use. Below is the list
+of models supported:
+
+- *Franceschini 2017*, see Ref. [2]
+- *Stecker 2016 lower limit* : lower limit model from Ref. [3]
+- *Stecker 2016 upper limit* : upper limit model from Ref. [3]
+- *Inoue 2012 Baseline* : baseline model from Ref. [4]
+- *Inoue 2012 lower limit* : lower limit model from Ref. [4]
+- *Inoue 2012 upper limit* : upper limit model from Ref. [4]
+- *zero* : CMB only
+
+References
+----------
+
+[1] O. Kalashev, A. Korochkin, A. Neronov, D. Semikoz,
+*Astron.Astrophys.* 675 (2023) A132
+
+[2] Alberto Franceschini, Giulia Rodighiero, *Astron.Astrophys.* 603
+(2017) A34
+
+[3] Floyd W. Stecker, Sean T. Scully, Matthew A. Malkan, *Astrophys.J.*
+827 (2016)
+
+[4] Yoshiyuki Inoue et al., *Astrophys.J.* 768 (2013) 197
+</help>
+  <citations>
+    <citation type="bibtex">@misc{label,
+			title = {Tool CRbeam},
+			url = {https://renkulab.io/projects/astronomy/mmoda/crbeam},
+			author = {Oleg Kalashev and Andrii Neronov},
+			year = {2024},
+			note = {}
+		}</citation>
+  </citations>
+</tool>
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/light_curve.py	Fri Apr 25 19:33:20 2025 +0000
@@ -0,0 +1,292 @@
+import os
+import sys
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+Mpc_in_h = 2.8590868063e10
+
+
+def weighted_quantile(
+    values, quantiles, sample_weight=None, values_sorted=False, old_style=False
+):
+    """Very close to numpy.percentile, but supports weights.
+    NOTE: quantiles should be in [0, 1]!
+    :param values: numpy.array with data
+    :param quantiles: array-like with many quantiles needed
+    :param sample_weight: array-like of the same length as `array`
+    :param values_sorted: bool, if True, then will avoid sorting of initial array
+    :param old_style: if True, will correct output to be consistent with numpy.percentile.
+    :return: numpy.array with computed quantiles.
+    """
+    values = np.array(values)
+    quantiles = np.array(quantiles)
+    if sample_weight is None:
+        sample_weight = np.ones(len(values))
+    sample_weight = np.array(sample_weight)
+    assert np.all(quantiles >= 0) and np.all(
+        quantiles <= 1
+    ), "quantiles should be in [0, 1]"
+
+    if not values_sorted:
+        sorter = np.argsort(values)
+        values = values[sorter]
+        sample_weight = sample_weight[sorter]
+
+    weighted_quantiles = np.cumsum(sample_weight) - 0.5 * sample_weight
+    if old_style:
+        # To be convenient with np.percentile
+        weighted_quantiles -= weighted_quantiles[0]
+        weighted_quantiles /= weighted_quantiles[-1]
+    else:
+        weighted_quantiles /= np.sum(sample_weight)
+    return np.interp(quantiles, weighted_quantiles, values)
+
+
+def get_distance_Mpc(mc_file):
+    # reading the source distance in Mpc from the data file
+    with open(mc_file) as f_mc_lines:
+        for line in f_mc_lines:
+            if line.startswith("#"):
+                cols = list(line.split())
+                idx = cols.index("T/Mpc")
+                if idx >= 0:
+                    return float(cols[idx + 2])
+    raise ValueError('Unexpected mc file format: "T/Mpc" not found')
+
+
+default_params = {
+    "weight_col": 2,
+    "E_src_col": 8,
+    "delay_col": 6,
+    "comoving_distance": None,
+    "n_points": 100,
+    "logscale": False,
+    "suffix": "",
+    "Emin": 1e6,
+    "Emax": 1e20,
+    "psf": 1.0,
+    "cut_0": False,
+    "rounding_error": 0.0007,  # 1./600,
+    "min_n_particles": 100,
+    "out_ext": "png",
+    "max_t": 0.05,  # use negative for percentile
+    "show": False,
+    "add_alpha": 0.0,  # multiply weights be E_src^{-add_alpha}
+    # 'frac_time': 2000/3600,  # calculate fraction of flux coming within this time in hours
+    "verbose": 2,
+    "format": ".2g",  # float format
+}
+
+
+def filter_data(data, **kwargs):
+    params = default_params.copy()
+    params.update(kwargs)
+
+    Emax = params["Emax"]
+    Emin = params["Emin"]
+    data = data[data[:, 0] <= Emax]
+    data = data[data[:, 0] >= Emin]
+    psf = params["psf"]
+    data = data[data[:, 2] <= psf]
+    cut0 = params["cut_0"]
+    if cut0:
+        col = params["delay_col"] - 1
+        data = data[data[:, col] != 0.0]
+    return data
+
+
+def get_counts(rotated_mc_file, **kwargs):
+    params = default_params.copy()
+    params.update(kwargs)
+    data = np.loadtxt(rotated_mc_file)
+    data_filtered = filter_data(data, **kwargs)
+    verbose = params["verbose"]
+    if verbose > 1:
+        print(len(data_filtered), "of", len(data), "has passed the filter")
+
+    weight_col = params["weight_col"] - 1
+    col = params["delay_col"] - 1
+    comoving_distance = params["comoving_distance"]
+    if not comoving_distance:
+        comoving_distance = get_distance_Mpc(rotated_mc_file)
+
+    x_scale = comoving_distance * Mpc_in_h  # convert to hours
+
+    delay = data_filtered[:, col] * x_scale
+
+    idxs = np.argsort(delay)
+    delay = delay[idxs]
+
+    if weight_col >= 0:
+        assert weight_col < data_filtered.shape[1]
+        weights = data_filtered[idxs, weight_col]
+    else:
+        weights = np.ones(len(idxs))
+
+    add_alpha = params["add_alpha"]
+    if add_alpha != 0:
+        E_src = data_filtered[:, params["E_src_col"]]
+        av_Esrc = np.exp(np.log(E_src).mean())
+        weights *= np.power(E_src / av_Esrc, -add_alpha)
+
+    return delay, weights
+
+
+def light_curve(delay, weights, **kwargs):
+    params = default_params.copy()
+    params.update(kwargs)
+    min_n_particles = params["min_n_particles"]
+    min_bin_size = params["rounding_error"]
+    max_t = params["max_t"]
+
+    if max_t < 0:
+        max_t = weighted_quantile(
+            delay, [-0.01 * max_t], sample_weight=weights)[0]
+
+    f = []
+    t = []
+    N = []
+
+    bin_idx = 0
+    if delay[0] < min_bin_size:
+        bin_idx = np.where(delay < min_bin_size)[0][-1]
+        if bin_idx + 1 < min_n_particles:
+            bin_idx = min_n_particles
+        wsum = np.sum(weights[:bin_idx])
+        _t = np.sum(delay[:bin_idx] * weights[:bin_idx]) / wsum
+        _t = max(_t, 0.5 * min_bin_size)
+        t.append(_t)
+        bin_size = max(delay[bin_idx] - delay[0], min_bin_size)
+        f.append(wsum / bin_size)
+        N.append(bin_idx)
+
+    while True:
+        xmin = (
+            0.5 * (delay[bin_idx - 1] + delay[bin_idx])
+            if bin_idx > 0
+            else delay[bin_idx]
+        )
+        if xmin > max_t:
+            break
+        bin_idx2 = np.where(
+            delay[bin_idx + min_n_particles:] > xmin + min_bin_size)[0]
+        if len(bin_idx2) == 0:
+            break
+        bin_idx2 = bin_idx2[0] + bin_idx + min_n_particles
+        _delay = delay[bin_idx:bin_idx2]
+        _weights = weights[bin_idx:bin_idx2]
+        wsum = _weights.sum()
+        t.append(np.sum(_delay * _weights) / wsum)
+        xmax = 0.5 * (delay[bin_idx2 - 1] + delay[bin_idx2])
+        f.append(wsum / (xmax - xmin))
+        N.append(bin_idx2 - bin_idx)
+        bin_idx = bin_idx2
+
+    return [np.array(x) for x in (t, f, N)]
+
+
+def make_plot(path, label="verbose", **kwargs):
+    params = default_params.copy()
+    params.update(kwargs)
+    path = os.path.expanduser(path)
+    fmt = params["format"]
+    col = params["delay_col"] - 1
+    logscale = params["logscale"]
+    verbose = params["verbose"]
+    Emax = params["Emax"]
+    Emin = params["Emin"]
+    psf = params["psf"]
+    cut0 = params["cut_0"]
+
+    delay, weights = get_counts(path, **kwargs)
+    t, f, _ = light_curve(delay, weights, **kwargs)
+
+    suffix = params["suffix"]
+
+    if cut0:
+        suffix += "_cut0"
+    if logscale:
+        suffix += "_log"
+
+    max_t = params["max_t"]
+    out_name = (
+        f"{path}_f{col + 1}_E{Emin:{fmt}}-{Emax:{fmt}}TeV_th{psf}_r{max_t}{suffix}."
+    )
+
+    x_label = "t, [h]"
+    y_label = "dN/dt [a.u.]"
+
+    if verbose > 1:
+        for key, val in params.items():
+            print(f"\t{key} = {val}")
+
+    if max_t < 0:
+        median, max_t = weighted_quantile(
+            delay, [0.5, -0.01 * max_t], sample_weight=weights
+        )
+    else:
+        median = weighted_quantile(delay, [0.5], sample_weight=weights)[0]
+
+    # the histogram of the data
+    plt.xlabel(x_label)
+    plt.ylabel(y_label)
+
+    if label == "verbose":
+        label = f"50% with delay<{median:{fmt}} h"
+    plot_args = {}
+    if label:
+        plot_args["label"] = label
+
+    plt.plot(t, f, "g-", **plot_args)
+
+    if logscale:
+        plt.xscale("log")
+        plt.yscale("log")
+
+    if label:
+        plt.legend(loc="upper right")
+
+    file_name = out_name + params["out_ext"].strip()
+    plt.savefig(file_name)
+    if verbose > 0:
+        print("saved to", file_name)
+    if params["show"]:
+        plt.show()
+
+    return file_name
+
+
+if __name__ == "__main__":
+
+    def usage_exit(reason=""):
+        print(reason, file=sys.stderr)
+        print(
+            "usage: python",
+            sys.argv[0],
+            "data_file",
+            *["{}={}".format(k, v) for k, v in default_params.items()],
+            file=sys.stderr,
+        )
+        exit(1)
+
+    if len(sys.argv) < 2:
+        usage_exit()
+
+    kwargs = {}
+    for par in sys.argv[2:]:
+        kv = par.split("=")
+        if len(kv) != 2:
+            usage_exit("invalid argument " + par)
+        if kv[0] not in default_params:
+            usage_exit("unknown parameter " + kv[0])
+        constr = type(default_params[kv[0]])
+        value = kv[1]
+        if constr == bool:
+            value = value.lower() == "false" or value == "0"
+            value = not value
+        else:
+            value = constr(value)
+        kwargs[kv[0]] = value
+
+    make_plot(sys.argv[1], **kwargs)
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/makeSpecE2.sh	Fri Apr 25 19:33:20 2025 +0000
@@ -0,0 +1,19 @@
+#!/bin/bash
+#prints normalized spectrum dN(E)/dE * E^2
+inidir=$PWD
+
+if [ $# -lt 1 ]
+then
+    echo "usage ./makeSpecE2.sh file"
+    exit 1
+fi
+
+if [ -f $1 ]
+then
+	ofile=$1.spec
+	cat $1 | awk '!/^($|[[:space:]]*#)/{ printf("%.1f %g\n",log($1)/log(10)+0.05,$2)}' | sort -g -k 1 | awk '{ if($1!=PREV&&NR>1) printf("%g %g %d\n",PREV,SUM,SUMN); if($1!=PREV||NR==1) {SUM=0; SUMN=0;}  SUM=SUM+$2; SUMN=SUMN+1; PREV=$1; } END {printf("%g %g %d\n",PREV,SUM,SUMN)}' | awk 'NF==3 { E=10^($1-0.05); printf("%g %g %d\n", E, $2*E/(10^0.05-10^(-0.05)),$3)}'>$ofile
+else
+	echo $1 not found
+	exit 1
+fi
+
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/mc_rotate.py	Fri Apr 25 19:33:20 2025 +0000
@@ -0,0 +1,179 @@
+import math
+
+import numpy as np
+
+
+def mc_rotate(
+    mc_file, jet_half_size_deg, jet_direction_deg, psf_deg=180, rotated_file=None
+):
+    if rotated_file is None:
+        rotated_file = f"{mc_file}_{jet_half_size_deg}_{jet_direction_deg}_{psf_deg}"
+
+    jet_half_size = (
+        np.longdouble(jet_half_size_deg) / 180.0 * math.pi
+    )  # jet openning angle
+    jet_direction = (
+        np.longdouble(jet_direction_deg) / 180.0 * math.pi
+    )  # angle between direction to source and jet axes
+
+    if jet_half_size > math.pi or jet_half_size <= 0:
+        jet_half_size = math.pi
+    # here we assume that jet axis is located in y-z plane if angle=0, then jet points towards observer
+    jet_axes = np.array(
+        [0, np.sin(jet_direction), np.cos(jet_direction)], dtype=np.longdouble
+    )
+
+    if jet_direction > math.pi or jet_direction < 0:
+        raise ValueError(
+            "invalid jet_direction_deg param [0, 180] range expected")
+
+    with open(mc_file, "r") as lines, open(rotated_file, "w") as rotated:
+        column_names = []
+        for line in lines:
+            columns = line.split()
+            if len(columns) == 0:
+                continue
+            if columns[0] == "#":
+                if "weight" in columns:
+                    column_names = (
+                        columns[1:3]
+                        + ["Theta", "Phi", "dT_raw/T", "dT_calc/T"]
+                        + columns[9:]
+                    )
+                    print("# " + "\t".join(column_names), file=rotated)
+                else:
+                    print(line.strip(), file=rotated)
+            else:
+                particle = np.array(list(map(np.longdouble, columns)))
+                beta = phi = 0 #dTcalc = 0
+
+                r = particle[2:5]
+                dz = r[
+                    2
+                    # dz is roughly equal to time delay in units of full path (assuming very small deflection angle)
+                ]
+
+                # negative dz may apper in data as a result of rounding errors
+                dz = max(dz, 0)  # replacing negative values by zeros
+
+                r[2] = 1.0 - dz  # now r is position of particle
+
+                # direction of momentum
+                n = particle[5:8]
+                n[2] = 1.0 - n[2]
+                # compensate rounding errors for n
+                mod_n = np.dot(n, n) ** 0.5
+                n = 1 / mod_n * n
+                t = 0  # calculated delay
+                if dz > 0:  # there is some delay
+                    r2 = np.dot(r, r)
+                    # assert r2 < 1
+                    if r2 > 1:
+                        r *= 1.0 / np.sqrt(r2)
+                        r2 = 1.0
+                    # we continue trajectory in the direction of momentun until it reaches the sphere of radius 1.
+                    # to find the delay t, we need to solve the following quadratic equation with respect to variable t:
+                    # [(x + V_x)*t]^2 + [(y + V_y)*t]^2 + [(z + V_z)*t]^2 = 1
+                    # with extra condition t > 0
+                    # below is the solution:
+                    rV = np.dot(n, r)
+                    t = np.sqrt(1.0 - r2 + rV * rV) - rV
+
+                    # calculating the end point of the trajectory
+                    r = r + t * n
+
+                # normalizing r to 1 to fix the rounding errors
+                modR = np.dot(r, r) ** 0.5
+                r = 1.0 / modR * r
+
+                # now we will construct the rotation of coordinates which converts vector r to 0,0,1
+                # to find the starting direction of the particle momentum
+                dr2 = r[0] ** 2 + r[1] ** 2
+                if dr2 > 0:
+                    # building rotation matrix to move x,y,z to 0,0,1
+                    norm = 1.0 / (dr2**0.5)
+                    # rotation axes:
+                    ux = norm * r[1]
+                    uy = -norm * r[0]
+                    # uz=0
+                    cos = r[2]
+                    sin = (1 - cos**2) ** 0.5
+
+                    toCenter = np.array(
+                        [
+                            [cos + ux * ux * (1 - cos), ux *
+                             uy * (1 - cos), uy * sin],
+                            [ux * uy * (1 - cos), cos + uy *
+                             uy * (1 - cos), -ux * sin],
+                            [-uy * sin, ux * sin, cos],
+                        ]
+                    )
+
+                    zAxes = np.array([0.0, 0.0, 1.0], dtype=np.longdouble)
+                    # test
+                    error = np.fabs(np.dot(toCenter, r) - zAxes).max()
+                    assert error < 1e-7
+                    # finding starting direction of the momentum
+                    nInitial = np.dot(toCenter, zAxes)
+                    norm_nInitial = np.sqrt(np.dot(nInitial, nInitial))
+                    nInitial *= (
+                        1.0 / norm_nInitial
+                    )  # normalize after rotation to avoid rounding errors
+                    # calculating angle between starting direction and jet axis
+                    startCos = np.dot(nInitial, jet_axes)
+                    startAngle = 0
+                    if (
+                        startCos < 1
+                    ):  # avoid roundout errors if angle is very close to zero
+                        startAngle = np.arccos(startCos)
+                    # TODO: here instead of using step function we can modify weight depending
+                    # on angular distance from jet axes
+                    if (
+                        startAngle < jet_half_size
+                    ):  # if the particle is inside the cone (passes cut)
+                        # calculate angle between arrival direction and direction to source
+                        cosBeta = np.dot(
+                            r, n
+                        )  # this angle is invariant (doesn't depend on turns)
+                        if (
+                            cosBeta < 1
+                        ):  # avoid rounding errors if angle is very close to zero
+                            beta = np.arccos(cosBeta)
+                            nFinal = np.dot(toCenter, n).transpose()
+                            cosPhi = nFinal[0] / np.sin(
+                                beta
+                                # second angle (could be useful in assymetric tasks) if beta=0 it is undefined (could be any)
+                            )
+                            if cosPhi < 1 and cosPhi > -1:
+                                phi = np.arccos(
+                                    cosPhi
+                                )  # returns value in interval [0,pi]
+                                if nFinal[1] < 0:
+                                    phi = 2 * math.pi - phi
+
+                        # s = math.sin(startAngle+beta)
+                        # if s>0.:
+                        #     dTcalc = (math.sin(startAngle)+math.sin(beta))/s - 1.
+                        # dTcalc = (1.-min(modR, 1.))/cosBeta  # condition modR > 1 can only occur due to rounding error
+                    else:
+                        continue
+                elif (
+                    jet_direction > jet_half_size
+                ):  # no deflection: check if jet is pointing towards observer
+                    continue
+
+                if beta * 180.0 / math.pi > psf_deg:
+                    continue
+
+                output = [
+                    particle[0],
+                    particle[1],
+                    beta * 180.0 / math.pi,
+                    phi * 180.0 / math.pi,
+                    dz,
+                    t,
+                ] + list(particle[8:])
+                output = ["{0:g}".format(i) for i in output]
+                print(*output, file=rotated)
+
+    return rotated_file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/spec_plot.py	Fri Apr 25 19:33:20 2025 +0000
@@ -0,0 +1,48 @@
+import matplotlib.pyplot as plt
+import numpy as np
+
+
+def plot_spec(spec_file, title="", Emin=0, Emax=0, ext="pdf", show=True, logscale=True):
+    data = np.loadtxt(spec_file)
+    if Emin > 0:
+        data = data[Emin <= data[:, 0]]
+    else:
+        Emin = 10 ** np.floor(np.log10(np.min(data[:, 0])))
+    if Emax > 0:
+        data = data[data[:, 0] <= Emax]
+    else:
+        Emax = 10 ** np.ceil(np.log10(np.max(data[:, 0])))
+
+    ymin = np.min(data[:, 1] - data[:, 1] / np.sqrt(data[:, 2]))
+    ymax = np.max(data[:, 1] + data[:, 1] / np.sqrt(data[:, 2]))
+
+    if title:
+        plt.title(title)
+    if logscale:
+        plt.xscale("log")
+        plt.yscale("log")
+        if ymin <= 0:
+            ymin = data[:, 1] - data[:, 1] / np.sqrt(data[:, 2])
+            ymin = ymin[ymin > 0]
+            if len(ymin) > 0:
+                ymin = np.min(ymin)
+            else:
+                ymin = 0
+        if ymin > 0:
+            ymin = 10 ** np.floor(np.log10(ymin))
+            ymax = 10 ** np.ceil(np.log10(ymax))
+            plt.ylim(ymin, ymax)
+    else:
+        plt.ylim(ymin - 0.1 * (ymax - ymin), ymax + 0.1 * (ymax - ymin))
+
+    plt.xlim(Emin, Emax)
+
+    # plt.plot(data[:,2])
+    plt.errorbar(
+        data[:, 0], data[:, 1], yerr=data[:, 1] / np.sqrt(data[:, 2]), fmt="-o"
+    )
+    out_file = spec_file + "." + ext
+    plt.savefig(out_file)
+    if show:
+        plt.show()
+    return out_file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/utils.py	Fri Apr 25 19:33:20 2025 +0000
@@ -0,0 +1,16 @@
+import numpy as np
+from astroquery.ipac.ned import Ned
+
+
+def find_redshift(src_name: str):
+    result_table = Ned.query_object(src_name)
+    # Check if there are results
+    if result_table is not None:
+        # Extract the redshift from the result table
+        z = float(result_table["Redshift"].data[0])
+        if np.isnan(z):
+            raise NotImplementedError(
+                f"Failed to find redshift for {src_name}")
+    else:
+        raise ValueError(f"Object named {src_name} not found in NED database")
+    return z