406
|
1 import argparse
|
|
2 import utils.general_utils as utils
|
407
|
3 from typing import Optional, Dict, Set, List, Tuple
|
406
|
4 import os
|
|
5 import numpy as np
|
|
6 import pandas as pd
|
|
7 import cobra
|
407
|
8 from cobra import Model, Reaction, Metabolite
|
|
9 import re
|
406
|
10 import sys
|
|
11 import csv
|
|
12 from joblib import Parallel, delayed, cpu_count
|
414
|
13 import utils.rule_parsing as rulesUtils
|
406
|
14
|
407
|
15 # , medium
|
|
16
|
406
|
17 ################################# process args ###############################
|
|
18 def process_args(args :List[str] = None) -> argparse.Namespace:
|
|
19 """
|
|
20 Processes command-line arguments.
|
|
21
|
|
22 Args:
|
|
23 args (list): List of command-line arguments.
|
|
24
|
|
25 Returns:
|
|
26 Namespace: An object containing parsed arguments.
|
|
27 """
|
|
28 parser = argparse.ArgumentParser(usage = '%(prog)s [options]',
|
|
29 description = 'process some value\'s')
|
|
30
|
|
31
|
407
|
32 parser.add_argument("-mo", "--model_upload", type = str,
|
406
|
33 help = "path to input file with custom rules, if provided")
|
|
34
|
|
35 parser.add_argument('-ol', '--out_log',
|
|
36 help = "Output log")
|
|
37
|
|
38 parser.add_argument('-td', '--tool_dir',
|
|
39 type = str,
|
|
40 required = True,
|
|
41 help = 'your tool directory')
|
|
42
|
|
43 parser.add_argument('-ir', '--input_ras',
|
|
44 type=str,
|
|
45 required = False,
|
|
46 help = 'input ras')
|
|
47
|
|
48 parser.add_argument('-rn', '--name',
|
|
49 type=str,
|
|
50 help = 'ras class names')
|
|
51
|
|
52 parser.add_argument('-rs', '--ras_selector',
|
|
53 required = True,
|
|
54 type=utils.Bool("using_RAS"),
|
|
55 help = 'ras selector')
|
|
56
|
|
57 parser.add_argument('-cc', '--cell_class',
|
|
58 type = str,
|
|
59 help = 'output of cell class')
|
|
60 parser.add_argument(
|
|
61 '-idop', '--output_path',
|
|
62 type = str,
|
|
63 default='ras_to_bounds/',
|
|
64 help = 'output path for maps')
|
|
65
|
411
|
66 parser.add_argument('-sm', '--save_models',
|
|
67 type=utils.Bool("save_models"),
|
|
68 default=False,
|
|
69 help = 'whether to save models with applied bounds')
|
|
70
|
|
71 parser.add_argument('-smp', '--save_models_path',
|
|
72 type = str,
|
|
73 default='saved_models/',
|
|
74 help = 'output path for saved models')
|
|
75
|
|
76 parser.add_argument('-smf', '--save_models_format',
|
|
77 type = str,
|
|
78 default='csv',
|
|
79 help = 'format for saved models (csv, xml, json, mat, yaml, tabular)')
|
|
80
|
406
|
81
|
|
82 ARGS = parser.parse_args(args)
|
|
83 return ARGS
|
|
84
|
|
85 ########################### warning ###########################################
|
|
86 def warning(s :str) -> None:
|
|
87 """
|
|
88 Log a warning message to an output log file and print it to the console.
|
|
89
|
|
90 Args:
|
|
91 s (str): The warning message to be logged and printed.
|
|
92
|
|
93 Returns:
|
|
94 None
|
|
95 """
|
411
|
96 if ARGS.out_log:
|
|
97 with open(ARGS.out_log, 'a') as log:
|
|
98 log.write(s + "\n\n")
|
406
|
99 print(s)
|
|
100
|
|
101 ############################ dataset input ####################################
|
|
102 def read_dataset(data :str, name :str) -> pd.DataFrame:
|
|
103 """
|
|
104 Read a dataset from a CSV file and return it as a pandas DataFrame.
|
|
105
|
|
106 Args:
|
|
107 data (str): Path to the CSV file containing the dataset.
|
|
108 name (str): Name of the dataset, used in error messages.
|
|
109
|
|
110 Returns:
|
|
111 pandas.DataFrame: DataFrame containing the dataset.
|
|
112
|
|
113 Raises:
|
|
114 pd.errors.EmptyDataError: If the CSV file is empty.
|
|
115 sys.exit: If the CSV file has the wrong format, the execution is aborted.
|
|
116 """
|
|
117 try:
|
|
118 dataset = pd.read_csv(data, sep = '\t', header = 0, engine='python')
|
|
119 except pd.errors.EmptyDataError:
|
|
120 sys.exit('Execution aborted: wrong format of ' + name + '\n')
|
|
121 if len(dataset.columns) < 2:
|
|
122 sys.exit('Execution aborted: wrong format of ' + name + '\n')
|
|
123 return dataset
|
|
124
|
|
125
|
|
126 def apply_ras_bounds(bounds, ras_row):
|
|
127 """
|
|
128 Adjust the bounds of reactions in the model based on RAS values.
|
|
129
|
|
130 Args:
|
|
131 bounds (pd.DataFrame): Model bounds.
|
|
132 ras_row (pd.Series): A row from a RAS DataFrame containing scaling factors for reaction bounds.
|
|
133 Returns:
|
|
134 new_bounds (pd.DataFrame): integrated bounds.
|
|
135 """
|
|
136 new_bounds = bounds.copy()
|
|
137 for reaction in ras_row.index:
|
|
138 scaling_factor = ras_row[reaction]
|
|
139 if not np.isnan(scaling_factor):
|
|
140 lower_bound=bounds.loc[reaction, "lower_bound"]
|
|
141 upper_bound=bounds.loc[reaction, "upper_bound"]
|
|
142 valMax=float((upper_bound)*scaling_factor)
|
|
143 valMin=float((lower_bound)*scaling_factor)
|
|
144 if upper_bound!=0 and lower_bound==0:
|
|
145 new_bounds.loc[reaction, "upper_bound"] = valMax
|
|
146 if upper_bound==0 and lower_bound!=0:
|
|
147 new_bounds.loc[reaction, "lower_bound"] = valMin
|
|
148 if upper_bound!=0 and lower_bound!=0:
|
|
149 new_bounds.loc[reaction, "lower_bound"] = valMin
|
|
150 new_bounds.loc[reaction, "upper_bound"] = valMax
|
|
151 return new_bounds
|
|
152
|
414
|
153 ################################- DATA GENERATION -################################
|
|
154 ReactionId = str
|
|
155 def generate_rules(model: cobra.Model, *, asParsed = True) -> Union[Dict[ReactionId, rulesUtils.OpList], Dict[ReactionId, str]]:
|
|
156 """
|
|
157 Generates a dictionary mapping reaction ids to rules from the model.
|
|
158
|
|
159 Args:
|
|
160 model : the model to derive data from.
|
|
161 asParsed : if True parses the rules to an optimized runtime format, otherwise leaves them as strings.
|
|
162
|
|
163 Returns:
|
|
164 Dict[ReactionId, rulesUtils.OpList] : the generated dictionary of parsed rules.
|
|
165 Dict[ReactionId, str] : the generated dictionary of raw rules.
|
|
166 """
|
|
167 # Is the below approach convoluted? yes
|
|
168 # Ok but is it inefficient? probably
|
|
169 # Ok but at least I don't have to repeat the check at every rule (I'm clinically insane)
|
|
170 _ruleGetter = lambda reaction : reaction.gene_reaction_rule
|
|
171 ruleExtractor = (lambda reaction :
|
|
172 rulesUtils.parseRuleToNestedList(_ruleGetter(reaction))) if asParsed else _ruleGetter
|
|
173
|
|
174 return {
|
|
175 reaction.id : ruleExtractor(reaction)
|
|
176 for reaction in model.reactions
|
|
177 if reaction.gene_reaction_rule }
|
|
178
|
|
179 def generate_reactions(model :cobra.Model, *, asParsed = True) -> Dict[ReactionId, str]:
|
|
180 """
|
|
181 Generates a dictionary mapping reaction ids to reaction formulas from the model.
|
|
182
|
|
183 Args:
|
|
184 model : the model to derive data from.
|
|
185 asParsed : if True parses the reactions to an optimized runtime format, otherwise leaves them as they are.
|
|
186
|
|
187 Returns:
|
|
188 Dict[ReactionId, str] : the generated dictionary.
|
|
189 """
|
|
190
|
|
191 unparsedReactions = {
|
|
192 reaction.id : reaction.reaction
|
|
193 for reaction in model.reactions
|
|
194 if reaction.reaction
|
|
195 }
|
|
196
|
|
197 if not asParsed: return unparsedReactions
|
|
198
|
|
199 return reactionUtils.create_reaction_dict(unparsedReactions)
|
|
200
|
|
201 def get_medium(model:cobra.Model) -> pd.DataFrame:
|
|
202 trueMedium=[]
|
|
203 for r in model.reactions:
|
|
204 positiveCoeff=0
|
|
205 for m in r.metabolites:
|
|
206 if r.get_coefficient(m.id)>0:
|
|
207 positiveCoeff=1;
|
|
208 if (positiveCoeff==0 and r.lower_bound<0):
|
|
209 trueMedium.append(r.id)
|
|
210
|
|
211 df_medium = pd.DataFrame()
|
|
212 df_medium["reaction"] = trueMedium
|
|
213 return df_medium
|
|
214
|
|
215 def generate_bounds(model:cobra.Model) -> pd.DataFrame:
|
|
216
|
|
217 rxns = []
|
|
218 for reaction in model.reactions:
|
|
219 rxns.append(reaction.id)
|
|
220
|
|
221 bounds = pd.DataFrame(columns = ["lower_bound", "upper_bound"], index=rxns)
|
|
222
|
|
223 for reaction in model.reactions:
|
|
224 bounds.loc[reaction.id] = [reaction.lower_bound, reaction.upper_bound]
|
|
225 return bounds
|
|
226
|
|
227
|
|
228
|
|
229 def generate_compartments(model: cobra.Model) -> pd.DataFrame:
|
|
230 """
|
|
231 Generates a DataFrame containing compartment information for each reaction.
|
|
232 Creates columns for each compartment position (Compartment_1, Compartment_2, etc.)
|
|
233
|
|
234 Args:
|
|
235 model: the COBRA model to extract compartment data from.
|
|
236
|
|
237 Returns:
|
|
238 pd.DataFrame: DataFrame with ReactionID and compartment columns
|
|
239 """
|
|
240 pathway_data = []
|
|
241
|
|
242 # First pass: determine the maximum number of pathways any reaction has
|
|
243 max_pathways = 0
|
|
244 reaction_pathways = {}
|
|
245
|
|
246 for reaction in model.reactions:
|
|
247 # Get unique pathways from all metabolites in the reaction
|
|
248 if type(reaction.annotation['pathways']) == list:
|
|
249 reaction_pathways[reaction.id] = reaction.annotation['pathways']
|
|
250 max_pathways = max(max_pathways, len(reaction.annotation['pathways']))
|
|
251 else:
|
|
252 reaction_pathways[reaction.id] = [reaction.annotation['pathways']]
|
|
253
|
|
254 # Create column names for pathways
|
|
255 pathway_columns = [f"Pathway_{i+1}" for i in range(max_pathways)]
|
|
256
|
|
257 # Second pass: create the data
|
|
258 for reaction_id, pathways in reaction_pathways.items():
|
|
259 row = {"ReactionID": reaction_id}
|
|
260
|
|
261 # Fill pathway columns
|
|
262 for i in range(max_pathways):
|
|
263 col_name = pathway_columns[i]
|
|
264 if i < len(pathways):
|
|
265 row[col_name] = pathways[i]
|
|
266 else:
|
|
267 row[col_name] = None # or "" if you prefer empty strings
|
|
268
|
|
269 pathway_data.append(row)
|
|
270
|
|
271 return pd.DataFrame(pathway_data)
|
|
272
|
411
|
273 def save_model(model, filename, output_folder, file_format='csv'):
|
|
274 """
|
|
275 Save a COBRA model to file in the specified format.
|
|
276
|
|
277 Args:
|
|
278 model (cobra.Model): The model to save.
|
|
279 filename (str): Base filename (without extension).
|
|
280 output_folder (str): Output directory.
|
|
281 file_format (str): File format ('xml', 'json', 'mat', 'yaml', 'tabular', 'csv').
|
|
282
|
|
283 Returns:
|
|
284 None
|
|
285 """
|
|
286 if not os.path.exists(output_folder):
|
|
287 os.makedirs(output_folder)
|
|
288
|
|
289 try:
|
|
290 if file_format == 'tabular' or file_format == 'csv':
|
|
291 # Special handling for tabular format using utils functions
|
|
292 filepath = os.path.join(output_folder, f"{filename}.csv")
|
|
293
|
414
|
294 rules = generate_rules(model, asParsed = False)
|
|
295 reactions = generate_reactions(model, asParsed = False)
|
|
296 bounds = generate_bounds(model)
|
|
297 medium = get_medium(model)
|
411
|
298
|
|
299 try:
|
|
300 compartments = utils.generate_compartments(model)
|
|
301 except:
|
|
302 compartments = None
|
|
303
|
|
304 df_rules = pd.DataFrame(list(rules.items()), columns = ["ReactionID", "Rule"])
|
|
305 df_reactions = pd.DataFrame(list(reactions.items()), columns = ["ReactionID", "Reaction"])
|
|
306 df_bounds = bounds.reset_index().rename(columns = {"index": "ReactionID"})
|
|
307 df_medium = medium.rename(columns = {"reaction": "ReactionID"})
|
|
308 df_medium["InMedium"] = True # flag per indicare la presenza nel medium
|
|
309
|
|
310 merged = df_reactions.merge(df_rules, on = "ReactionID", how = "outer")
|
|
311 merged = merged.merge(df_bounds, on = "ReactionID", how = "outer")
|
|
312
|
|
313 # Add compartments only if they exist and model name is ENGRO2
|
|
314 if compartments is not None and hasattr(ARGS, 'name') and ARGS.name == "ENGRO2":
|
|
315 merged = merged.merge(compartments, on = "ReactionID", how = "outer")
|
|
316
|
|
317 merged = merged.merge(df_medium, on = "ReactionID", how = "left")
|
|
318 merged["InMedium"] = merged["InMedium"].fillna(False)
|
|
319 merged = merged.sort_values(by = "InMedium", ascending = False)
|
|
320
|
|
321 merged.to_csv(filepath, sep="\t", index=False)
|
|
322
|
|
323 else:
|
|
324 # Standard COBRA formats
|
|
325 filepath = os.path.join(output_folder, f"{filename}.{file_format}")
|
|
326
|
|
327 if file_format == 'xml':
|
|
328 cobra.io.write_sbml_model(model, filepath)
|
|
329 elif file_format == 'json':
|
|
330 cobra.io.save_json_model(model, filepath)
|
|
331 elif file_format == 'mat':
|
|
332 cobra.io.save_matlab_model(model, filepath)
|
|
333 elif file_format == 'yaml':
|
|
334 cobra.io.save_yaml_model(model, filepath)
|
|
335 else:
|
|
336 raise ValueError(f"Unsupported format: {file_format}")
|
|
337
|
|
338 print(f"Model saved: {filepath}")
|
|
339
|
|
340 except Exception as e:
|
|
341 warning(f"Error saving model {filename}: {str(e)}")
|
|
342
|
|
343 def apply_bounds_to_model(model, bounds):
|
|
344 """
|
|
345 Apply bounds from a DataFrame to a COBRA model.
|
|
346
|
|
347 Args:
|
|
348 model (cobra.Model): The metabolic model to modify.
|
|
349 bounds (pd.DataFrame): DataFrame with reaction bounds.
|
|
350
|
|
351 Returns:
|
|
352 cobra.Model: Modified model with new bounds.
|
|
353 """
|
|
354 model_copy = model.copy()
|
|
355 for reaction_id in bounds.index:
|
|
356 try:
|
|
357 reaction = model_copy.reactions.get_by_id(reaction_id)
|
|
358 reaction.lower_bound = bounds.loc[reaction_id, "lower_bound"]
|
|
359 reaction.upper_bound = bounds.loc[reaction_id, "upper_bound"]
|
|
360 except KeyError:
|
|
361 # Reaction not found in model, skip
|
|
362 continue
|
|
363 return model_copy
|
|
364
|
|
365 def process_ras_cell(cellName, ras_row, model, rxns_ids, output_folder, save_models=False, save_models_path='saved_models/', save_models_format='csv'):
|
406
|
366 """
|
|
367 Process a single RAS cell, apply bounds, and save the bounds to a CSV file.
|
|
368
|
|
369 Args:
|
|
370 cellName (str): The name of the RAS cell (used for naming the output file).
|
|
371 ras_row (pd.Series): A row from a RAS DataFrame containing scaling factors for reaction bounds.
|
|
372 model (cobra.Model): The metabolic model to be modified.
|
|
373 rxns_ids (list of str): List of reaction IDs to which the scaling factors will be applied.
|
|
374 output_folder (str): Folder path where the output CSV file will be saved.
|
411
|
375 save_models (bool): Whether to save models with applied bounds.
|
|
376 save_models_path (str): Path where to save models.
|
|
377 save_models_format (str): Format for saved models.
|
406
|
378
|
|
379 Returns:
|
|
380 None
|
|
381 """
|
|
382 bounds = pd.DataFrame([(rxn.lower_bound, rxn.upper_bound) for rxn in model.reactions], index=rxns_ids, columns=["lower_bound", "upper_bound"])
|
|
383 new_bounds = apply_ras_bounds(bounds, ras_row)
|
|
384 new_bounds.to_csv(output_folder + cellName + ".csv", sep='\t', index=True)
|
411
|
385
|
|
386 # Save model if requested
|
|
387 if save_models:
|
|
388 modified_model = apply_bounds_to_model(model, new_bounds)
|
|
389 save_model(modified_model, cellName, save_models_path, save_models_format)
|
|
390
|
406
|
391 pass
|
|
392
|
414
|
393 def generate_bounds_model(model: cobra.Model, ras=None, output_folder='output/', save_models=False, save_models_path='saved_models/', save_models_format='csv') -> pd.DataFrame:
|
406
|
394 """
|
|
395 Generate reaction bounds for a metabolic model based on medium conditions and optional RAS adjustments.
|
|
396
|
|
397 Args:
|
|
398 model (cobra.Model): The metabolic model for which bounds will be generated.
|
|
399 ras (pd.DataFrame, optional): RAS pandas dataframe. Defaults to None.
|
|
400 output_folder (str, optional): Folder path where output CSV files will be saved. Defaults to 'output/'.
|
411
|
401 save_models (bool): Whether to save models with applied bounds.
|
|
402 save_models_path (str): Path where to save models.
|
|
403 save_models_format (str): Format for saved models.
|
406
|
404
|
|
405 Returns:
|
|
406 pd.DataFrame: DataFrame containing the bounds of reactions in the model.
|
|
407 """
|
407
|
408 rxns_ids = [rxn.id for rxn in model.reactions]
|
406
|
409
|
|
410 # Perform Flux Variability Analysis (FVA) on this medium
|
|
411 df_FVA = cobra.flux_analysis.flux_variability_analysis(model, fraction_of_optimum=0, processes=1).round(8)
|
|
412
|
|
413 # Set FVA bounds
|
|
414 for reaction in rxns_ids:
|
|
415 model.reactions.get_by_id(reaction).lower_bound = float(df_FVA.loc[reaction, "minimum"])
|
|
416 model.reactions.get_by_id(reaction).upper_bound = float(df_FVA.loc[reaction, "maximum"])
|
|
417
|
|
418 if ras is not None:
|
411
|
419 Parallel(n_jobs=cpu_count())(delayed(process_ras_cell)(
|
|
420 cellName, ras_row, model, rxns_ids, output_folder,
|
|
421 save_models, save_models_path, save_models_format
|
|
422 ) for cellName, ras_row in ras.iterrows())
|
406
|
423 else:
|
|
424 bounds = pd.DataFrame([(rxn.lower_bound, rxn.upper_bound) for rxn in model.reactions], index=rxns_ids, columns=["lower_bound", "upper_bound"])
|
|
425 newBounds = apply_ras_bounds(bounds, pd.Series([1]*len(rxns_ids), index=rxns_ids))
|
|
426 newBounds.to_csv(output_folder + "bounds.csv", sep='\t', index=True)
|
411
|
427
|
|
428 # Save model if requested
|
|
429 if save_models:
|
|
430 modified_model = apply_bounds_to_model(model, newBounds)
|
|
431 save_model(modified_model, "model_with_bounds", save_models_path, save_models_format)
|
|
432
|
406
|
433 pass
|
|
434
|
|
435 ############################# main ###########################################
|
|
436 def main(args:List[str] = None) -> None:
|
|
437 """
|
|
438 Initializes everything and sets the program in motion based on the fronted input arguments.
|
|
439
|
|
440 Returns:
|
|
441 None
|
|
442 """
|
|
443 if not os.path.exists('ras_to_bounds'):
|
|
444 os.makedirs('ras_to_bounds')
|
|
445
|
|
446 global ARGS
|
|
447 ARGS = process_args(args)
|
|
448
|
|
449 if(ARGS.ras_selector == True):
|
|
450 ras_file_list = ARGS.input_ras.split(",")
|
|
451 ras_file_names = ARGS.name.split(",")
|
|
452 if len(ras_file_names) != len(set(ras_file_names)):
|
|
453 error_message = "Duplicated file names in the uploaded RAS matrices."
|
|
454 warning(error_message)
|
|
455 raise ValueError(error_message)
|
|
456 pass
|
|
457 ras_class_names = []
|
|
458 for file in ras_file_names:
|
|
459 ras_class_names.append(file.rsplit(".", 1)[0])
|
|
460 ras_list = []
|
|
461 class_assignments = pd.DataFrame(columns=["Patient_ID", "Class"])
|
|
462 for ras_matrix, ras_class_name in zip(ras_file_list, ras_class_names):
|
|
463 ras = read_dataset(ras_matrix, "ras dataset")
|
|
464 ras.replace("None", None, inplace=True)
|
|
465 ras.set_index("Reactions", drop=True, inplace=True)
|
|
466 ras = ras.T
|
|
467 ras = ras.astype(float)
|
|
468 if(len(ras_file_list)>1):
|
|
469 #append class name to patient id (dataframe index)
|
|
470 ras.index = [f"{idx}_{ras_class_name}" for idx in ras.index]
|
|
471 else:
|
|
472 ras.index = [f"{idx}" for idx in ras.index]
|
|
473 ras_list.append(ras)
|
|
474 for patient_id in ras.index:
|
|
475 class_assignments.loc[class_assignments.shape[0]] = [patient_id, ras_class_name]
|
|
476
|
|
477
|
|
478 # Concatenate all ras DataFrames into a single DataFrame
|
|
479 ras_combined = pd.concat(ras_list, axis=0)
|
|
480 # Normalize the RAS values by max RAS
|
|
481 ras_combined = ras_combined.div(ras_combined.max(axis=0))
|
|
482 ras_combined.dropna(axis=1, how='all', inplace=True)
|
|
483
|
408
|
484 model = utils.build_cobra_model_from_csv(ARGS.model_upload)
|
407
|
485
|
408
|
486 validation = utils.validate_model(model)
|
406
|
487
|
407
|
488 print("\n=== VALIDAZIONE MODELLO ===")
|
|
489 for key, value in validation.items():
|
|
490 print(f"{key}: {value}")
|
|
491
|
406
|
492 if(ARGS.ras_selector == True):
|
414
|
493 generate_bounds_model(model, ras=ras_combined, output_folder=ARGS.output_path,
|
411
|
494 save_models=ARGS.save_models, save_models_path=ARGS.save_models_path,
|
|
495 save_models_format=ARGS.save_models_format)
|
|
496 class_assignments.to_csv(ARGS.cell_class, sep='\t', index=False)
|
406
|
497 else:
|
414
|
498 generate_bounds_model(model, output_folder=ARGS.output_path,
|
411
|
499 save_models=ARGS.save_models, save_models_path=ARGS.save_models_path,
|
|
500 save_models_format=ARGS.save_models_format)
|
406
|
501
|
|
502 pass
|
|
503
|
|
504 ##############################################################################
|
|
505 if __name__ == "__main__":
|
|
506 main() |