Mercurial > repos > bimib > cobraxy
comparison COBRAxy/metabolicModel2Tabular.py @ 491:7a413a5ec566 draft
Uploaded
author | francesco_lapi |
---|---|
date | Mon, 29 Sep 2025 15:34:59 +0000 |
parents | |
children | 4ed95023af20 |
comparison
equal
deleted
inserted
replaced
490:c6ea189ea7e9 | 491:7a413a5ec566 |
---|---|
1 """ | |
2 Scripts to generate a tabular file of a metabolic model (built-in or custom). | |
3 | |
4 This script loads a COBRA model (built-in or custom), optionally applies | |
5 medium and gene nomenclature settings, derives reaction-related metadata | |
6 (GPR rules, formulas, bounds, objective coefficients, medium membership, | |
7 and compartments for ENGRO2), and writes a tabular summary. | |
8 """ | |
9 | |
10 import os | |
11 import csv | |
12 import cobra | |
13 import argparse | |
14 import pandas as pd | |
15 import utils.general_utils as utils | |
16 from typing import Optional, Tuple, List | |
17 import utils.model_utils as modelUtils | |
18 import logging | |
19 from pathlib import Path | |
20 | |
21 | |
22 ARGS : argparse.Namespace | |
23 def process_args(args: List[str] = None) -> argparse.Namespace: | |
24 """ | |
25 Parse command-line arguments for metabolic_model_setting. | |
26 """ | |
27 | |
28 parser = argparse.ArgumentParser( | |
29 usage="%(prog)s [options]", | |
30 description="Generate custom data from a given model" | |
31 ) | |
32 | |
33 parser.add_argument("--out_log", type=str, required=True, | |
34 help="Output log file") | |
35 | |
36 parser.add_argument("--model", type=str, | |
37 help="Built-in model identifier (e.g., ENGRO2, Recon, HMRcore)") | |
38 parser.add_argument("--input", type=str, | |
39 help="Custom model file (JSON or XML)") | |
40 parser.add_argument("--name", type=str, required=True, | |
41 help="Model name (default or custom)") | |
42 | |
43 parser.add_argument("--medium_selector", type=str, required=True, | |
44 help="Medium selection option") | |
45 | |
46 parser.add_argument("--gene_format", type=str, default="Default", | |
47 help="Gene nomenclature format: Default (original), ENSNG, HGNC_SYMBOL, HGNC_ID, ENTREZ") | |
48 | |
49 parser.add_argument("--out_tabular", type=str, | |
50 help="Output file for the merged dataset (CSV or XLSX)") | |
51 | |
52 parser.add_argument("--tool_dir", type=str, default=os.path.dirname(__file__), | |
53 help="Tool directory (passed from Galaxy as $__tool_directory__)") | |
54 | |
55 | |
56 return parser.parse_args(args) | |
57 | |
58 ################################- INPUT DATA LOADING -################################ | |
59 def load_custom_model(file_path :utils.FilePath, ext :Optional[utils.FileFormat] = None) -> cobra.Model: | |
60 """ | |
61 Loads a custom model from a file, either in JSON, XML, MAT, or YML format. | |
62 | |
63 Args: | |
64 file_path : The path to the file containing the custom model. | |
65 ext : explicit file extension. Necessary for standard use in galaxy because of its weird behaviour. | |
66 | |
67 Raises: | |
68 DataErr : if the file is in an invalid format or cannot be opened for whatever reason. | |
69 | |
70 Returns: | |
71 cobra.Model : the model, if successfully opened. | |
72 """ | |
73 ext = ext if ext else file_path.ext | |
74 try: | |
75 if ext is utils.FileFormat.XML: | |
76 return cobra.io.read_sbml_model(file_path.show()) | |
77 | |
78 if ext is utils.FileFormat.JSON: | |
79 return cobra.io.load_json_model(file_path.show()) | |
80 | |
81 if ext is utils.FileFormat.MAT: | |
82 return cobra.io.load_matlab_model(file_path.show()) | |
83 | |
84 if ext is utils.FileFormat.YML: | |
85 return cobra.io.load_yaml_model(file_path.show()) | |
86 | |
87 except Exception as e: raise utils.DataErr(file_path, e.__str__()) | |
88 raise utils.DataErr( | |
89 file_path, | |
90 f"Unrecognized format '{file_path.ext}'. Only JSON, XML, MAT, YML are supported." | |
91 ) | |
92 | |
93 | |
94 ###############################- FILE SAVING -################################ | |
95 def save_as_csv_filePath(data :dict, file_path :utils.FilePath, fieldNames :Tuple[str, str]) -> None: | |
96 """ | |
97 Saves any dictionary-shaped data in a .csv file created at the given file_path as FilePath. | |
98 | |
99 Args: | |
100 data : the data to be written to the file. | |
101 file_path : the path to the .csv file. | |
102 fieldNames : the names of the fields (columns) in the .csv file. | |
103 | |
104 Returns: | |
105 None | |
106 """ | |
107 with open(file_path.show(), 'w', newline='') as csvfile: | |
108 writer = csv.DictWriter(csvfile, fieldnames = fieldNames, dialect="excel-tab") | |
109 writer.writeheader() | |
110 | |
111 for key, value in data.items(): | |
112 writer.writerow({ fieldNames[0] : key, fieldNames[1] : value }) | |
113 | |
114 def save_as_csv(data :dict, file_path :str, fieldNames :Tuple[str, str]) -> None: | |
115 """ | |
116 Saves any dictionary-shaped data in a .csv file created at the given file_path as string. | |
117 | |
118 Args: | |
119 data : the data to be written to the file. | |
120 file_path : the path to the .csv file. | |
121 fieldNames : the names of the fields (columns) in the .csv file. | |
122 | |
123 Returns: | |
124 None | |
125 """ | |
126 with open(file_path, 'w', newline='') as csvfile: | |
127 writer = csv.DictWriter(csvfile, fieldnames = fieldNames, dialect="excel-tab") | |
128 writer.writeheader() | |
129 | |
130 for key, value in data.items(): | |
131 writer.writerow({ fieldNames[0] : key, fieldNames[1] : value }) | |
132 | |
133 def save_as_tabular_df(df: pd.DataFrame, path: str) -> None: | |
134 """ | |
135 Save a pandas DataFrame as a tab-separated file, creating directories as needed. | |
136 | |
137 Args: | |
138 df: The DataFrame to write. | |
139 path: Destination file path (will be written as TSV). | |
140 | |
141 Raises: | |
142 DataErr: If writing the output fails for any reason. | |
143 | |
144 Returns: | |
145 None | |
146 """ | |
147 try: | |
148 os.makedirs(os.path.dirname(path) or ".", exist_ok=True) | |
149 df.to_csv(path, sep="\t", index=False) | |
150 except Exception as e: | |
151 raise utils.DataErr(path, f"failed writing tabular output: {e}") | |
152 | |
153 def is_placeholder(gid) -> bool: | |
154 """Return True if the gene id looks like a placeholder (e.g., 0/NA/NAN/empty).""" | |
155 if gid is None: | |
156 return True | |
157 s = str(gid).strip().lower() | |
158 return s in {"0", "", "na", "nan"} # lowercase for simple matching | |
159 | |
160 def sample_valid_gene_ids(genes, limit=10): | |
161 """Yield up to `limit` valid gene IDs, skipping placeholders (e.g., the first 0 in RECON).""" | |
162 out = [] | |
163 for g in genes: | |
164 gid = getattr(g, "id", getattr(g, "gene_id", g)) | |
165 if not is_placeholder(gid): | |
166 out.append(str(gid)) | |
167 if len(out) >= limit: | |
168 break | |
169 return out | |
170 | |
171 | |
172 ###############################- ENTRY POINT -################################ | |
173 def main(args:List[str] = None) -> None: | |
174 """ | |
175 Initialize and generate custom data based on the frontend input arguments. | |
176 | |
177 Returns: | |
178 None | |
179 """ | |
180 # Parse args from frontend (Galaxy XML) | |
181 global ARGS | |
182 ARGS = process_args(args) | |
183 | |
184 | |
185 if ARGS.input: | |
186 # Load a custom model from file | |
187 model = load_custom_model( | |
188 utils.FilePath.fromStrPath(ARGS.input), utils.FilePath.fromStrPath(ARGS.name).ext) | |
189 else: | |
190 # Load a built-in model | |
191 | |
192 try: | |
193 model_enum = utils.Model[ARGS.model] # e.g., Model['ENGRO2'] | |
194 except KeyError: | |
195 raise utils.ArgsErr("model", "one of Recon/ENGRO2/HMRcore/Custom_model", ARGS.model) | |
196 | |
197 # Load built-in model (Model.getCOBRAmodel uses tool_dir to locate local models) | |
198 try: | |
199 model = model_enum.getCOBRAmodel(toolDir=ARGS.tool_dir) | |
200 except Exception as e: | |
201 # Wrap/normalize load errors as DataErr for consistency | |
202 raise utils.DataErr(ARGS.model, f"failed loading built-in model: {e}") | |
203 | |
204 # Determine final model name: explicit --name overrides, otherwise use the model id | |
205 | |
206 model_name = ARGS.name if ARGS.name else ARGS.model | |
207 | |
208 if ARGS.name == "ENGRO2" and ARGS.medium_selector != "Default": | |
209 df_mediums = pd.read_csv(ARGS.tool_dir + "/local/medium/medium.csv", index_col = 0) | |
210 ARGS.medium_selector = ARGS.medium_selector.replace("_", " ") | |
211 medium = df_mediums[[ARGS.medium_selector]] | |
212 medium = medium[ARGS.medium_selector].to_dict() | |
213 | |
214 # Reset all medium reactions lower bound to zero | |
215 for rxn_id, _ in model.medium.items(): | |
216 model.reactions.get_by_id(rxn_id).lower_bound = float(0.0) | |
217 | |
218 # Apply selected medium uptake bounds (negative for uptake) | |
219 for reaction, value in medium.items(): | |
220 if value is not None: | |
221 model.reactions.get_by_id(reaction).lower_bound = -float(value) | |
222 | |
223 # Initialize translation_issues dictionary | |
224 translation_issues = {} | |
225 | |
226 if (ARGS.name == "Recon" or ARGS.name == "ENGRO2") and ARGS.gene_format != "Default": | |
227 logging.basicConfig(level=logging.INFO) | |
228 logger = logging.getLogger(__name__) | |
229 | |
230 model, translation_issues = modelUtils.translate_model_genes( | |
231 model=model, | |
232 mapping_df= pd.read_csv(ARGS.tool_dir + "/local/mappings/genes_human.csv", dtype={'entrez_id': str}), | |
233 target_nomenclature=ARGS.gene_format, | |
234 source_nomenclature='HGNC_symbol', | |
235 logger=logger | |
236 ) | |
237 | |
238 if ARGS.name == "Custom_model" and ARGS.gene_format != "Default": | |
239 logging.basicConfig(level=logging.INFO) | |
240 logger = logging.getLogger(__name__) | |
241 | |
242 tmp_check = [] | |
243 for g in model.genes[1:5]: # check first 3 genes only | |
244 tmp_check.append(modelUtils.gene_type(g.id, "Custom_model")) | |
245 | |
246 if len(set(tmp_check)) > 1: | |
247 raise utils.DataErr("Custom_model", "The custom model contains genes with mixed or unrecognized nomenclature. Please ensure all genes use the same recognized nomenclature before applying gene_format conversion.") | |
248 else: | |
249 source_nomenclature = tmp_check[0] | |
250 | |
251 if source_nomenclature != ARGS.gene_format: | |
252 model, translation_issues = modelUtils.translate_model_genes( | |
253 model=model, | |
254 mapping_df= pd.read_csv(ARGS.tool_dir + "/local/mappings/genes_human.csv", dtype={'entrez_id': str}), | |
255 target_nomenclature=ARGS.gene_format, | |
256 source_nomenclature=source_nomenclature, | |
257 logger=logger | |
258 ) | |
259 | |
260 | |
261 | |
262 | |
263 if ARGS.name == "Custom_model" and ARGS.gene_format != "Default": | |
264 logger = logging.getLogger(__name__) | |
265 | |
266 # Take a small, clean sample of gene IDs (skipping placeholders like 0) | |
267 ids_sample = sample_valid_gene_ids(model.genes, limit=10) | |
268 if not ids_sample: | |
269 raise utils.DataErr( | |
270 "Custom_model", | |
271 "No valid gene IDs found (many may be placeholders like 0)." | |
272 ) | |
273 | |
274 # Detect source nomenclature on the sample | |
275 types = [] | |
276 for gid in ids_sample: | |
277 try: | |
278 t = modelUtils.gene_type(gid, "Custom_model") | |
279 except Exception as e: | |
280 # Keep it simple: skip problematic IDs | |
281 logger.debug(f"gene_type failed for {gid}: {e}") | |
282 t = None | |
283 if t: | |
284 types.append(t) | |
285 | |
286 if not types: | |
287 raise utils.DataErr( | |
288 "Custom_model", | |
289 "Could not detect a known gene nomenclature from the sample." | |
290 ) | |
291 | |
292 unique_types = set(types) | |
293 if len(unique_types) > 1: | |
294 raise utils.DataErr( | |
295 "Custom_model", | |
296 "Mixed or inconsistent gene nomenclatures detected. " | |
297 "Please unify them before converting." | |
298 ) | |
299 | |
300 source_nomenclature = types[0] | |
301 | |
302 # Convert only if needed | |
303 if source_nomenclature != ARGS.gene_format: | |
304 model, translation_issues = modelUtils.translate_model_genes( | |
305 model=model, | |
306 mapping_df= pd.read_csv(ARGS.tool_dir + "/local/mappings/genes_human.csv", dtype={'entrez_id': str}), | |
307 target_nomenclature=ARGS.gene_format, | |
308 source_nomenclature=source_nomenclature, | |
309 logger=logger | |
310 ) | |
311 | |
312 # generate data | |
313 rules = modelUtils.generate_rules(model, asParsed = False) | |
314 reactions = modelUtils.generate_reactions(model, asParsed = False) | |
315 bounds = modelUtils.generate_bounds(model) | |
316 medium = modelUtils.get_medium(model) | |
317 objective_function = modelUtils.extract_objective_coefficients(model) | |
318 | |
319 if ARGS.name == "ENGRO2": | |
320 compartments = modelUtils.generate_compartments(model) | |
321 | |
322 df_rules = pd.DataFrame(list(rules.items()), columns = ["ReactionID", "GPR"]) | |
323 df_reactions = pd.DataFrame(list(reactions.items()), columns = ["ReactionID", "Formula"]) | |
324 | |
325 # Create DataFrame for translation issues | |
326 df_translation_issues = pd.DataFrame([ | |
327 {"ReactionID": rxn_id, "TranslationIssues": issues} | |
328 for rxn_id, issues in translation_issues.items() | |
329 ]) | |
330 | |
331 df_bounds = bounds.reset_index().rename(columns = {"index": "ReactionID"}) | |
332 df_medium = medium.rename(columns = {"reaction": "ReactionID"}) | |
333 df_medium["InMedium"] = True | |
334 | |
335 merged = df_reactions.merge(df_rules, on = "ReactionID", how = "outer") | |
336 merged = merged.merge(df_bounds, on = "ReactionID", how = "outer") | |
337 merged = merged.merge(objective_function, on = "ReactionID", how = "outer") | |
338 if ARGS.name == "ENGRO2": | |
339 merged = merged.merge(compartments, on = "ReactionID", how = "outer") | |
340 merged = merged.merge(df_medium, on = "ReactionID", how = "left") | |
341 | |
342 # Add translation issues column | |
343 if not df_translation_issues.empty: | |
344 merged = merged.merge(df_translation_issues, on = "ReactionID", how = "left") | |
345 merged["TranslationIssues"] = merged["TranslationIssues"].fillna("") | |
346 else: | |
347 # Add empty TranslationIssues column if no issues found | |
348 #merged["TranslationIssues"] = "" | |
349 pass | |
350 | |
351 merged["InMedium"] = merged["InMedium"].fillna(False) | |
352 | |
353 merged = merged.sort_values(by = "InMedium", ascending = False) | |
354 | |
355 if not ARGS.out_tabular: | |
356 raise utils.ArgsErr("out_tabular", "output path (--out_tabular) is required when output_format == tabular", ARGS.out_tabular) | |
357 save_as_tabular_df(merged, ARGS.out_tabular) | |
358 expected = ARGS.out_tabular | |
359 | |
360 # verify output exists and non-empty | |
361 if not expected or not os.path.exists(expected) or os.path.getsize(expected) == 0: | |
362 raise utils.DataErr(expected, "Output not created or empty") | |
363 | |
364 print("Metabolic_model_setting: completed successfully") | |
365 | |
366 if __name__ == '__main__': | |
367 | |
368 main() |