| 540 | 1 """ | 
|  | 2 Scripts to generate a tabular file of a metabolic model (built-in or custom). | 
|  | 3 | 
|  | 4 This script loads a COBRA model (built-in or custom), optionally applies | 
|  | 5 medium and gene nomenclature settings, derives reaction-related metadata | 
|  | 6 (GPR rules, formulas, bounds, objective coefficients, medium membership, | 
|  | 7 and compartments for ENGRO2), and writes a tabular summary. | 
|  | 8 """ | 
|  | 9 | 
|  | 10 import os | 
|  | 11 import csv | 
|  | 12 import cobra | 
|  | 13 import argparse | 
|  | 14 import pandas as pd | 
| 542 | 15 try: | 
|  | 16     from .utils import general_utils as utils | 
|  | 17     from .utils import model_utils as modelUtils | 
|  | 18 except: | 
|  | 19     import utils.general_utils as utils | 
|  | 20     import utils.model_utils as modelUtils | 
| 540 | 21 from typing import Optional, Tuple, List | 
|  | 22 import logging | 
|  | 23 from pathlib import Path | 
|  | 24 | 
|  | 25 | 
|  | 26 ARGS : argparse.Namespace | 
|  | 27 def process_args(args: List[str] = None) -> argparse.Namespace: | 
|  | 28     """ | 
|  | 29     Parse command-line arguments. | 
|  | 30     """ | 
|  | 31 | 
|  | 32     parser = argparse.ArgumentParser( | 
|  | 33         usage="%(prog)s [options]", | 
|  | 34         description="Generate custom data from a given model" | 
|  | 35     ) | 
|  | 36 | 
|  | 37     parser.add_argument("--out_log", type=str, required=True, | 
|  | 38                         help="Output log file") | 
|  | 39 | 
|  | 40     parser.add_argument("--model", type=str, | 
|  | 41                         help="Built-in model identifier (e.g., ENGRO2, Recon, HMRcore)") | 
|  | 42     parser.add_argument("--input", type=str, | 
|  | 43                         help="Custom model file (JSON, XML, MAT, YAML)") | 
|  | 44     parser.add_argument("--name", nargs='*', required=True, | 
|  | 45                         help="Model name (default or custom)") | 
|  | 46 | 
| 542 | 47     parser.add_argument("--medium_selector", type=str, default="Default", | 
| 540 | 48                         help="Medium selection option") | 
|  | 49 | 
|  | 50     parser.add_argument("--gene_format", type=str, default="Default", | 
|  | 51                         help="Gene nomenclature format: Default (original), ENSNG, HGNC_SYMBOL, HGNC_ID, ENTREZ") | 
|  | 52 | 
|  | 53     parser.add_argument("--out_tabular", type=str, | 
|  | 54                         help="Output file for the merged dataset (CSV or XLSX)") | 
|  | 55 | 
| 542 | 56     parser.add_argument("--tool_dir", type=str, default=os.path.dirname(os.path.abspath(__file__)), | 
|  | 57                         help="Tool directory (default: auto-detected package location)") | 
| 540 | 58 | 
|  | 59 | 
|  | 60     return parser.parse_args(args) | 
|  | 61 | 
|  | 62 ################################- INPUT DATA LOADING -################################ | 
|  | 63 def detect_file_format(file_path: str) -> utils.FileFormat: | 
|  | 64     """ | 
|  | 65     Detect file format by examining file content and extension. | 
|  | 66     Handles Galaxy .dat files by looking at content. | 
|  | 67     """ | 
|  | 68     try: | 
|  | 69         with open(file_path, 'r') as f: | 
|  | 70             first_lines = ''.join([f.readline() for _ in range(5)]) | 
|  | 71 | 
|  | 72         # Check for XML (SBML) | 
|  | 73         if '<?xml' in first_lines or '<sbml' in first_lines: | 
|  | 74             return utils.FileFormat.XML | 
|  | 75 | 
|  | 76         # Check for JSON | 
|  | 77         if first_lines.strip().startswith('{'): | 
|  | 78             return utils.FileFormat.JSON | 
|  | 79 | 
|  | 80         # Check for YAML | 
|  | 81         if any(line.strip().endswith(':') for line in first_lines.split('\n')[:3]): | 
|  | 82             return utils.FileFormat.YML | 
|  | 83 | 
|  | 84     except: | 
|  | 85         pass | 
|  | 86 | 
|  | 87     # Fall back to extension-based detection | 
|  | 88     if file_path.endswith('.xml') or file_path.endswith('.sbml'): | 
|  | 89         return utils.FileFormat.XML | 
|  | 90     elif file_path.endswith('.json'): | 
|  | 91         return utils.FileFormat.JSON | 
|  | 92     elif file_path.endswith('.mat'): | 
|  | 93         return utils.FileFormat.MAT | 
|  | 94     elif file_path.endswith('.yml') or file_path.endswith('.yaml'): | 
|  | 95         return utils.FileFormat.YML | 
|  | 96 | 
|  | 97     # Default to XML for unknown extensions | 
|  | 98     return utils.FileFormat.XML | 
|  | 99 | 
|  | 100 def load_custom_model(file_path :utils.FilePath, ext :Optional[utils.FileFormat] = None) -> cobra.Model: | 
|  | 101     """ | 
|  | 102     Loads a custom model from a file, either in JSON, XML, MAT, or YML format. | 
|  | 103 | 
|  | 104     Args: | 
|  | 105         file_path : The path to the file containing the custom model. | 
|  | 106         ext : explicit file extension. Necessary for standard use in galaxy because of its weird behaviour. | 
|  | 107 | 
|  | 108     Raises: | 
|  | 109         DataErr : if the file is in an invalid format or cannot be opened for whatever reason. | 
|  | 110 | 
|  | 111     Returns: | 
|  | 112         cobra.Model : the model, if successfully opened. | 
|  | 113     """ | 
|  | 114     ext = ext if ext else file_path.ext | 
|  | 115     try: | 
|  | 116         if ext is utils.FileFormat.XML: | 
|  | 117             return cobra.io.read_sbml_model(file_path.show()) | 
|  | 118 | 
|  | 119         if ext is utils.FileFormat.JSON: | 
|  | 120             return cobra.io.load_json_model(file_path.show()) | 
|  | 121 | 
|  | 122         if ext is utils.FileFormat.MAT: | 
|  | 123             return cobra.io.load_matlab_model(file_path.show()) | 
|  | 124 | 
|  | 125         if ext is utils.FileFormat.YML: | 
|  | 126             return cobra.io.load_yaml_model(file_path.show()) | 
|  | 127 | 
|  | 128     except Exception as e: raise utils.DataErr(file_path, e.__str__()) | 
|  | 129     raise utils.DataErr( | 
|  | 130         file_path, | 
|  | 131         f"Unrecognized format '{file_path.ext}'. Only JSON, XML, MAT, YML are supported." | 
|  | 132     ) | 
|  | 133 | 
|  | 134 | 
|  | 135 ###############################- FILE SAVING -################################ | 
|  | 136 def save_as_csv_filePath(data :dict, file_path :utils.FilePath, fieldNames :Tuple[str, str]) -> None: | 
|  | 137     """ | 
|  | 138     Saves any dictionary-shaped data in a .csv file created at the given file_path as FilePath. | 
|  | 139 | 
|  | 140     Args: | 
|  | 141         data : the data to be written to the file. | 
|  | 142         file_path : the path to the .csv file. | 
|  | 143         fieldNames : the names of the fields (columns) in the .csv file. | 
|  | 144 | 
|  | 145     Returns: | 
|  | 146         None | 
|  | 147     """ | 
|  | 148     with open(file_path.show(), 'w', newline='') as csvfile: | 
|  | 149         writer = csv.DictWriter(csvfile, fieldnames = fieldNames, dialect="excel-tab") | 
|  | 150         writer.writeheader() | 
|  | 151 | 
|  | 152         for key, value in data.items(): | 
|  | 153             writer.writerow({ fieldNames[0] : key, fieldNames[1] : value }) | 
|  | 154 | 
|  | 155 def save_as_csv(data :dict, file_path :str, fieldNames :Tuple[str, str]) -> None: | 
|  | 156     """ | 
|  | 157     Saves any dictionary-shaped data in a .csv file created at the given file_path as string. | 
|  | 158 | 
|  | 159     Args: | 
|  | 160         data : the data to be written to the file. | 
|  | 161         file_path : the path to the .csv file. | 
|  | 162         fieldNames : the names of the fields (columns) in the .csv file. | 
|  | 163 | 
|  | 164     Returns: | 
|  | 165         None | 
|  | 166     """ | 
|  | 167     with open(file_path, 'w', newline='') as csvfile: | 
|  | 168         writer = csv.DictWriter(csvfile, fieldnames = fieldNames, dialect="excel-tab") | 
|  | 169         writer.writeheader() | 
|  | 170 | 
|  | 171         for key, value in data.items(): | 
|  | 172             writer.writerow({ fieldNames[0] : key, fieldNames[1] : value }) | 
|  | 173 | 
|  | 174 def save_as_tabular_df(df: pd.DataFrame, path: str) -> None: | 
|  | 175     """ | 
|  | 176     Save a pandas DataFrame as a tab-separated file, creating directories as needed. | 
|  | 177 | 
|  | 178     Args: | 
|  | 179         df: The DataFrame to write. | 
|  | 180         path: Destination file path (will be written as TSV). | 
|  | 181 | 
|  | 182     Raises: | 
|  | 183         DataErr: If writing the output fails for any reason. | 
|  | 184 | 
|  | 185     Returns: | 
|  | 186         None | 
|  | 187     """ | 
|  | 188     try: | 
|  | 189         os.makedirs(os.path.dirname(path) or ".", exist_ok=True) | 
|  | 190         df.to_csv(path, sep="\t", index=False) | 
|  | 191     except Exception as e: | 
|  | 192         raise utils.DataErr(path, f"failed writing tabular output: {e}") | 
|  | 193 | 
|  | 194 def is_placeholder(gid) -> bool: | 
|  | 195     """Return True if the gene id looks like a placeholder (e.g., 0/NA/NAN/empty).""" | 
|  | 196     if gid is None: | 
|  | 197         return True | 
|  | 198     s = str(gid).strip().lower() | 
|  | 199     return s in {"0", "", "na", "nan"}  # lowercase for simple matching | 
|  | 200 | 
|  | 201 def sample_valid_gene_ids(genes, limit=10): | 
|  | 202     """Yield up to `limit` valid gene IDs, skipping placeholders (e.g., the first 0 in RECON).""" | 
|  | 203     out = [] | 
|  | 204     for g in genes: | 
|  | 205         gid = getattr(g, "id", getattr(g, "gene_id", g)) | 
|  | 206         if not is_placeholder(gid): | 
|  | 207             out.append(str(gid)) | 
|  | 208             if len(out) >= limit: | 
|  | 209                 break | 
|  | 210     return out | 
|  | 211 | 
|  | 212 | 
|  | 213 ###############################- ENTRY POINT -################################ | 
|  | 214 def main(args:List[str] = None) -> None: | 
|  | 215     """ | 
|  | 216     Initialize and generate custom data based on the frontend input arguments. | 
|  | 217 | 
|  | 218     Returns: | 
|  | 219         None | 
|  | 220     """ | 
|  | 221     # Parse args from frontend (Galaxy XML) | 
|  | 222     global ARGS | 
|  | 223     ARGS = process_args(args) | 
|  | 224 | 
|  | 225     # Convert name from list to string (handles names with spaces) | 
|  | 226     if isinstance(ARGS.name, list): | 
|  | 227         ARGS.name = ' '.join(ARGS.name) | 
|  | 228 | 
|  | 229     if ARGS.input: | 
|  | 230         # Load a custom model from file with auto-detected format | 
|  | 231         detected_format = detect_file_format(ARGS.input) | 
|  | 232         model = load_custom_model(utils.FilePath.fromStrPath(ARGS.input), detected_format) | 
|  | 233     else: | 
|  | 234         # Load a built-in model | 
|  | 235         if not ARGS.model: | 
|  | 236             raise utils.ArgsErr("model", "either --model or --input must be provided", "None") | 
|  | 237 | 
|  | 238         try: | 
|  | 239             model_enum = utils.Model[ARGS.model]  # e.g., Model['ENGRO2'] | 
|  | 240         except KeyError: | 
|  | 241             raise utils.ArgsErr("model", "one of Recon/ENGRO2/HMRcore/Custom_model", ARGS.model) | 
|  | 242 | 
|  | 243         # Load built-in model (Model.getCOBRAmodel uses tool_dir to locate local models) | 
|  | 244         try: | 
|  | 245             model = model_enum.getCOBRAmodel(toolDir=ARGS.tool_dir) | 
|  | 246         except Exception as e: | 
|  | 247             # Wrap/normalize load errors as DataErr for consistency | 
|  | 248             raise utils.DataErr(ARGS.model, f"failed loading built-in model: {e}") | 
|  | 249 | 
|  | 250     # Determine final model name: explicit --name overrides, otherwise use the model id | 
|  | 251 | 
|  | 252     if ARGS.name == "ENGRO2" and ARGS.medium_selector != "Default": | 
|  | 253         df_mediums = pd.read_csv(ARGS.tool_dir + "/local/medium/medium.csv", index_col = 0) | 
|  | 254         #ARGS.medium_selector = ARGS.medium_selector.replace("_", " ") medium.csv uses underscores now | 
|  | 255         medium = df_mediums[[ARGS.medium_selector]] | 
|  | 256         medium = medium[ARGS.medium_selector].to_dict() | 
|  | 257 | 
|  | 258         # Reset all medium reactions lower bound to zero | 
|  | 259         for rxn_id, _ in model.medium.items(): | 
|  | 260             model.reactions.get_by_id(rxn_id).lower_bound = float(0.0) | 
|  | 261 | 
|  | 262         # Apply selected medium uptake bounds (negative for uptake) | 
|  | 263         for reaction, value in medium.items(): | 
|  | 264             if value is not None: | 
|  | 265                 model.reactions.get_by_id(reaction).lower_bound = -float(value) | 
|  | 266 | 
|  | 267     # Initialize translation_issues dictionary | 
|  | 268     translation_issues = {} | 
|  | 269 | 
|  | 270     if (ARGS.name == "Recon" or ARGS.name == "ENGRO2") and ARGS.gene_format != "Default": | 
|  | 271         logging.basicConfig(level=logging.INFO) | 
|  | 272         logger = logging.getLogger(__name__) | 
|  | 273 | 
|  | 274         model, translation_issues = modelUtils.translate_model_genes( | 
|  | 275             model=model, | 
|  | 276             mapping_df= pd.read_csv(ARGS.tool_dir + "/local/mappings/genes_human.csv", dtype={'entrez_id': str}), | 
|  | 277             target_nomenclature=ARGS.gene_format, | 
|  | 278             source_nomenclature='HGNC_symbol', | 
|  | 279             logger=logger | 
|  | 280         ) | 
|  | 281 | 
|  | 282     if ARGS.input and ARGS.gene_format != "Default": | 
|  | 283         logging.basicConfig(level=logging.INFO) | 
|  | 284         logger = logging.getLogger(__name__) | 
|  | 285 | 
|  | 286         # Take a small, clean sample of gene IDs (skipping placeholders like 0) | 
|  | 287         ids_sample = sample_valid_gene_ids(model.genes, limit=10) | 
|  | 288         if not ids_sample: | 
|  | 289             raise utils.DataErr( | 
|  | 290                 "Custom_model", | 
|  | 291                 "No valid gene IDs found (many may be placeholders like 0)." | 
|  | 292             ) | 
|  | 293 | 
|  | 294         # Detect source nomenclature on the sample | 
|  | 295         types = [] | 
|  | 296         for gid in ids_sample: | 
|  | 297             try: | 
|  | 298                 t = modelUtils.gene_type(gid, "Custom_model") | 
|  | 299             except Exception as e: | 
|  | 300                 # Keep it simple: skip problematic IDs | 
|  | 301                 logger.debug(f"gene_type failed for {gid}: {e}") | 
|  | 302                 t = None | 
|  | 303             if t: | 
|  | 304                 types.append(t) | 
|  | 305 | 
|  | 306         if not types: | 
|  | 307             raise utils.DataErr( | 
|  | 308                 "Custom_model", | 
|  | 309                 "Could not detect a known gene nomenclature from the sample." | 
|  | 310             ) | 
|  | 311 | 
|  | 312         unique_types = set(types) | 
|  | 313         if len(unique_types) > 1: | 
|  | 314             raise utils.DataErr( | 
|  | 315                 "Custom_model", | 
|  | 316                 "Mixed or inconsistent gene nomenclatures detected. " | 
|  | 317                 "Please unify them before converting." | 
|  | 318             ) | 
|  | 319 | 
|  | 320         source_nomenclature = types[0] | 
|  | 321 | 
|  | 322         # Convert only if needed | 
|  | 323         if source_nomenclature != ARGS.gene_format: | 
|  | 324             model, translation_issues = modelUtils.translate_model_genes( | 
|  | 325                 model=model, | 
|  | 326                 mapping_df= pd.read_csv(ARGS.tool_dir + "/local/mappings/genes_human.csv", dtype={'entrez_id': str}), | 
|  | 327                 target_nomenclature=ARGS.gene_format, | 
|  | 328                 source_nomenclature=source_nomenclature, | 
|  | 329                 logger=logger | 
|  | 330             ) | 
|  | 331 | 
|  | 332     # generate data using unified function | 
|  | 333     if not ARGS.out_tabular: | 
|  | 334         raise utils.ArgsErr("out_tabular", "output path (--out_tabular) is required when output_format == tabular", ARGS.out_tabular) | 
|  | 335 | 
|  | 336     merged = modelUtils.export_model_to_tabular( | 
|  | 337         model=model, | 
|  | 338         output_path=ARGS.out_tabular, | 
|  | 339         translation_issues=translation_issues, | 
|  | 340         include_objective=True, | 
|  | 341         save_function=save_as_tabular_df | 
|  | 342     ) | 
|  | 343     expected = ARGS.out_tabular | 
|  | 344 | 
|  | 345     # verify output exists and non-empty | 
|  | 346     if not expected or not os.path.exists(expected) or os.path.getsize(expected) == 0: | 
|  | 347         raise utils.DataErr(expected, "Output not created or empty") | 
|  | 348 | 
|  | 349     print("Completed successfully") | 
|  | 350 | 
|  | 351 if __name__ == '__main__': | 
|  | 352 | 
|  | 353     main() |