406
|
1 import argparse
|
|
2 import utils.general_utils as utils
|
416
|
3 from typing import Optional, Dict, Set, List, Tuple, Union
|
406
|
4 import os
|
|
5 import numpy as np
|
|
6 import pandas as pd
|
|
7 import cobra
|
407
|
8 from cobra import Model, Reaction, Metabolite
|
|
9 import re
|
406
|
10 import sys
|
|
11 import csv
|
|
12 from joblib import Parallel, delayed, cpu_count
|
414
|
13 import utils.rule_parsing as rulesUtils
|
416
|
14 import utils.reaction_utils as reactionUtils
|
406
|
15
|
407
|
16 # , medium
|
|
17
|
406
|
18 ################################# process args ###############################
|
|
19 def process_args(args :List[str] = None) -> argparse.Namespace:
|
|
20 """
|
|
21 Processes command-line arguments.
|
|
22
|
|
23 Args:
|
|
24 args (list): List of command-line arguments.
|
|
25
|
|
26 Returns:
|
|
27 Namespace: An object containing parsed arguments.
|
|
28 """
|
|
29 parser = argparse.ArgumentParser(usage = '%(prog)s [options]',
|
|
30 description = 'process some value\'s')
|
|
31
|
|
32
|
407
|
33 parser.add_argument("-mo", "--model_upload", type = str,
|
406
|
34 help = "path to input file with custom rules, if provided")
|
|
35
|
|
36 parser.add_argument('-ol', '--out_log',
|
|
37 help = "Output log")
|
|
38
|
|
39 parser.add_argument('-td', '--tool_dir',
|
|
40 type = str,
|
|
41 required = True,
|
|
42 help = 'your tool directory')
|
|
43
|
|
44 parser.add_argument('-ir', '--input_ras',
|
|
45 type=str,
|
|
46 required = False,
|
|
47 help = 'input ras')
|
|
48
|
|
49 parser.add_argument('-rn', '--name',
|
|
50 type=str,
|
|
51 help = 'ras class names')
|
|
52
|
|
53 parser.add_argument('-rs', '--ras_selector',
|
|
54 required = True,
|
|
55 type=utils.Bool("using_RAS"),
|
|
56 help = 'ras selector')
|
|
57
|
|
58 parser.add_argument('-cc', '--cell_class',
|
|
59 type = str,
|
|
60 help = 'output of cell class')
|
|
61 parser.add_argument(
|
|
62 '-idop', '--output_path',
|
|
63 type = str,
|
|
64 default='ras_to_bounds/',
|
|
65 help = 'output path for maps')
|
|
66
|
411
|
67 parser.add_argument('-sm', '--save_models',
|
|
68 type=utils.Bool("save_models"),
|
|
69 default=False,
|
|
70 help = 'whether to save models with applied bounds')
|
|
71
|
|
72 parser.add_argument('-smp', '--save_models_path',
|
|
73 type = str,
|
|
74 default='saved_models/',
|
|
75 help = 'output path for saved models')
|
|
76
|
|
77 parser.add_argument('-smf', '--save_models_format',
|
|
78 type = str,
|
|
79 default='csv',
|
|
80 help = 'format for saved models (csv, xml, json, mat, yaml, tabular)')
|
|
81
|
406
|
82
|
|
83 ARGS = parser.parse_args(args)
|
|
84 return ARGS
|
|
85
|
|
86 ########################### warning ###########################################
|
|
87 def warning(s :str) -> None:
|
|
88 """
|
|
89 Log a warning message to an output log file and print it to the console.
|
|
90
|
|
91 Args:
|
|
92 s (str): The warning message to be logged and printed.
|
|
93
|
|
94 Returns:
|
|
95 None
|
|
96 """
|
411
|
97 if ARGS.out_log:
|
|
98 with open(ARGS.out_log, 'a') as log:
|
|
99 log.write(s + "\n\n")
|
406
|
100 print(s)
|
|
101
|
|
102 ############################ dataset input ####################################
|
|
103 def read_dataset(data :str, name :str) -> pd.DataFrame:
|
|
104 """
|
|
105 Read a dataset from a CSV file and return it as a pandas DataFrame.
|
|
106
|
|
107 Args:
|
|
108 data (str): Path to the CSV file containing the dataset.
|
|
109 name (str): Name of the dataset, used in error messages.
|
|
110
|
|
111 Returns:
|
|
112 pandas.DataFrame: DataFrame containing the dataset.
|
|
113
|
|
114 Raises:
|
|
115 pd.errors.EmptyDataError: If the CSV file is empty.
|
|
116 sys.exit: If the CSV file has the wrong format, the execution is aborted.
|
|
117 """
|
|
118 try:
|
|
119 dataset = pd.read_csv(data, sep = '\t', header = 0, engine='python')
|
|
120 except pd.errors.EmptyDataError:
|
|
121 sys.exit('Execution aborted: wrong format of ' + name + '\n')
|
|
122 if len(dataset.columns) < 2:
|
|
123 sys.exit('Execution aborted: wrong format of ' + name + '\n')
|
|
124 return dataset
|
|
125
|
|
126
|
|
127 def apply_ras_bounds(bounds, ras_row):
|
|
128 """
|
|
129 Adjust the bounds of reactions in the model based on RAS values.
|
|
130
|
|
131 Args:
|
|
132 bounds (pd.DataFrame): Model bounds.
|
|
133 ras_row (pd.Series): A row from a RAS DataFrame containing scaling factors for reaction bounds.
|
|
134 Returns:
|
|
135 new_bounds (pd.DataFrame): integrated bounds.
|
|
136 """
|
|
137 new_bounds = bounds.copy()
|
|
138 for reaction in ras_row.index:
|
|
139 scaling_factor = ras_row[reaction]
|
|
140 if not np.isnan(scaling_factor):
|
|
141 lower_bound=bounds.loc[reaction, "lower_bound"]
|
|
142 upper_bound=bounds.loc[reaction, "upper_bound"]
|
|
143 valMax=float((upper_bound)*scaling_factor)
|
|
144 valMin=float((lower_bound)*scaling_factor)
|
|
145 if upper_bound!=0 and lower_bound==0:
|
|
146 new_bounds.loc[reaction, "upper_bound"] = valMax
|
|
147 if upper_bound==0 and lower_bound!=0:
|
|
148 new_bounds.loc[reaction, "lower_bound"] = valMin
|
|
149 if upper_bound!=0 and lower_bound!=0:
|
|
150 new_bounds.loc[reaction, "lower_bound"] = valMin
|
|
151 new_bounds.loc[reaction, "upper_bound"] = valMax
|
|
152 return new_bounds
|
|
153
|
414
|
154 ################################- DATA GENERATION -################################
|
|
155 ReactionId = str
|
|
156 def generate_rules(model: cobra.Model, *, asParsed = True) -> Union[Dict[ReactionId, rulesUtils.OpList], Dict[ReactionId, str]]:
|
|
157 """
|
|
158 Generates a dictionary mapping reaction ids to rules from the model.
|
|
159
|
|
160 Args:
|
|
161 model : the model to derive data from.
|
|
162 asParsed : if True parses the rules to an optimized runtime format, otherwise leaves them as strings.
|
|
163
|
|
164 Returns:
|
|
165 Dict[ReactionId, rulesUtils.OpList] : the generated dictionary of parsed rules.
|
|
166 Dict[ReactionId, str] : the generated dictionary of raw rules.
|
|
167 """
|
|
168 # Is the below approach convoluted? yes
|
|
169 # Ok but is it inefficient? probably
|
|
170 # Ok but at least I don't have to repeat the check at every rule (I'm clinically insane)
|
|
171 _ruleGetter = lambda reaction : reaction.gene_reaction_rule
|
|
172 ruleExtractor = (lambda reaction :
|
|
173 rulesUtils.parseRuleToNestedList(_ruleGetter(reaction))) if asParsed else _ruleGetter
|
|
174
|
|
175 return {
|
|
176 reaction.id : ruleExtractor(reaction)
|
|
177 for reaction in model.reactions
|
|
178 if reaction.gene_reaction_rule }
|
|
179
|
|
180 def generate_reactions(model :cobra.Model, *, asParsed = True) -> Dict[ReactionId, str]:
|
|
181 """
|
|
182 Generates a dictionary mapping reaction ids to reaction formulas from the model.
|
|
183
|
|
184 Args:
|
|
185 model : the model to derive data from.
|
|
186 asParsed : if True parses the reactions to an optimized runtime format, otherwise leaves them as they are.
|
|
187
|
|
188 Returns:
|
|
189 Dict[ReactionId, str] : the generated dictionary.
|
|
190 """
|
|
191
|
|
192 unparsedReactions = {
|
|
193 reaction.id : reaction.reaction
|
|
194 for reaction in model.reactions
|
|
195 if reaction.reaction
|
|
196 }
|
|
197
|
|
198 if not asParsed: return unparsedReactions
|
|
199
|
|
200 return reactionUtils.create_reaction_dict(unparsedReactions)
|
|
201
|
|
202 def get_medium(model:cobra.Model) -> pd.DataFrame:
|
|
203 trueMedium=[]
|
|
204 for r in model.reactions:
|
|
205 positiveCoeff=0
|
|
206 for m in r.metabolites:
|
|
207 if r.get_coefficient(m.id)>0:
|
|
208 positiveCoeff=1;
|
|
209 if (positiveCoeff==0 and r.lower_bound<0):
|
|
210 trueMedium.append(r.id)
|
|
211
|
|
212 df_medium = pd.DataFrame()
|
|
213 df_medium["reaction"] = trueMedium
|
|
214 return df_medium
|
|
215
|
|
216 def generate_bounds(model:cobra.Model) -> pd.DataFrame:
|
|
217
|
|
218 rxns = []
|
|
219 for reaction in model.reactions:
|
|
220 rxns.append(reaction.id)
|
|
221
|
|
222 bounds = pd.DataFrame(columns = ["lower_bound", "upper_bound"], index=rxns)
|
|
223
|
|
224 for reaction in model.reactions:
|
|
225 bounds.loc[reaction.id] = [reaction.lower_bound, reaction.upper_bound]
|
|
226 return bounds
|
|
227
|
|
228
|
|
229
|
|
230 def generate_compartments(model: cobra.Model) -> pd.DataFrame:
|
|
231 """
|
|
232 Generates a DataFrame containing compartment information for each reaction.
|
|
233 Creates columns for each compartment position (Compartment_1, Compartment_2, etc.)
|
|
234
|
|
235 Args:
|
|
236 model: the COBRA model to extract compartment data from.
|
|
237
|
|
238 Returns:
|
|
239 pd.DataFrame: DataFrame with ReactionID and compartment columns
|
|
240 """
|
|
241 pathway_data = []
|
|
242
|
|
243 # First pass: determine the maximum number of pathways any reaction has
|
|
244 max_pathways = 0
|
|
245 reaction_pathways = {}
|
|
246
|
|
247 for reaction in model.reactions:
|
|
248 # Get unique pathways from all metabolites in the reaction
|
|
249 if type(reaction.annotation['pathways']) == list:
|
|
250 reaction_pathways[reaction.id] = reaction.annotation['pathways']
|
|
251 max_pathways = max(max_pathways, len(reaction.annotation['pathways']))
|
|
252 else:
|
|
253 reaction_pathways[reaction.id] = [reaction.annotation['pathways']]
|
|
254
|
|
255 # Create column names for pathways
|
|
256 pathway_columns = [f"Pathway_{i+1}" for i in range(max_pathways)]
|
|
257
|
|
258 # Second pass: create the data
|
|
259 for reaction_id, pathways in reaction_pathways.items():
|
|
260 row = {"ReactionID": reaction_id}
|
|
261
|
|
262 # Fill pathway columns
|
|
263 for i in range(max_pathways):
|
|
264 col_name = pathway_columns[i]
|
|
265 if i < len(pathways):
|
|
266 row[col_name] = pathways[i]
|
|
267 else:
|
|
268 row[col_name] = None # or "" if you prefer empty strings
|
|
269
|
|
270 pathway_data.append(row)
|
|
271
|
|
272 return pd.DataFrame(pathway_data)
|
|
273
|
411
|
274 def save_model(model, filename, output_folder, file_format='csv'):
|
|
275 """
|
|
276 Save a COBRA model to file in the specified format.
|
|
277
|
|
278 Args:
|
|
279 model (cobra.Model): The model to save.
|
|
280 filename (str): Base filename (without extension).
|
|
281 output_folder (str): Output directory.
|
|
282 file_format (str): File format ('xml', 'json', 'mat', 'yaml', 'tabular', 'csv').
|
|
283
|
|
284 Returns:
|
|
285 None
|
|
286 """
|
|
287 if not os.path.exists(output_folder):
|
|
288 os.makedirs(output_folder)
|
|
289
|
|
290 try:
|
|
291 if file_format == 'tabular' or file_format == 'csv':
|
|
292 # Special handling for tabular format using utils functions
|
|
293 filepath = os.path.join(output_folder, f"{filename}.csv")
|
|
294
|
414
|
295 rules = generate_rules(model, asParsed = False)
|
|
296 reactions = generate_reactions(model, asParsed = False)
|
|
297 bounds = generate_bounds(model)
|
|
298 medium = get_medium(model)
|
411
|
299
|
|
300 try:
|
|
301 compartments = utils.generate_compartments(model)
|
|
302 except:
|
|
303 compartments = None
|
|
304
|
|
305 df_rules = pd.DataFrame(list(rules.items()), columns = ["ReactionID", "Rule"])
|
|
306 df_reactions = pd.DataFrame(list(reactions.items()), columns = ["ReactionID", "Reaction"])
|
|
307 df_bounds = bounds.reset_index().rename(columns = {"index": "ReactionID"})
|
|
308 df_medium = medium.rename(columns = {"reaction": "ReactionID"})
|
|
309 df_medium["InMedium"] = True # flag per indicare la presenza nel medium
|
|
310
|
|
311 merged = df_reactions.merge(df_rules, on = "ReactionID", how = "outer")
|
|
312 merged = merged.merge(df_bounds, on = "ReactionID", how = "outer")
|
|
313
|
|
314 # Add compartments only if they exist and model name is ENGRO2
|
|
315 if compartments is not None and hasattr(ARGS, 'name') and ARGS.name == "ENGRO2":
|
|
316 merged = merged.merge(compartments, on = "ReactionID", how = "outer")
|
|
317
|
|
318 merged = merged.merge(df_medium, on = "ReactionID", how = "left")
|
|
319 merged["InMedium"] = merged["InMedium"].fillna(False)
|
|
320 merged = merged.sort_values(by = "InMedium", ascending = False)
|
|
321
|
|
322 merged.to_csv(filepath, sep="\t", index=False)
|
|
323
|
|
324 else:
|
|
325 # Standard COBRA formats
|
|
326 filepath = os.path.join(output_folder, f"{filename}.{file_format}")
|
|
327
|
|
328 if file_format == 'xml':
|
|
329 cobra.io.write_sbml_model(model, filepath)
|
|
330 elif file_format == 'json':
|
|
331 cobra.io.save_json_model(model, filepath)
|
|
332 elif file_format == 'mat':
|
|
333 cobra.io.save_matlab_model(model, filepath)
|
|
334 elif file_format == 'yaml':
|
|
335 cobra.io.save_yaml_model(model, filepath)
|
|
336 else:
|
|
337 raise ValueError(f"Unsupported format: {file_format}")
|
|
338
|
|
339 print(f"Model saved: {filepath}")
|
|
340
|
|
341 except Exception as e:
|
|
342 warning(f"Error saving model {filename}: {str(e)}")
|
|
343
|
|
344 def apply_bounds_to_model(model, bounds):
|
|
345 """
|
|
346 Apply bounds from a DataFrame to a COBRA model.
|
|
347
|
|
348 Args:
|
|
349 model (cobra.Model): The metabolic model to modify.
|
|
350 bounds (pd.DataFrame): DataFrame with reaction bounds.
|
|
351
|
|
352 Returns:
|
|
353 cobra.Model: Modified model with new bounds.
|
|
354 """
|
|
355 model_copy = model.copy()
|
|
356 for reaction_id in bounds.index:
|
|
357 try:
|
|
358 reaction = model_copy.reactions.get_by_id(reaction_id)
|
|
359 reaction.lower_bound = bounds.loc[reaction_id, "lower_bound"]
|
|
360 reaction.upper_bound = bounds.loc[reaction_id, "upper_bound"]
|
|
361 except KeyError:
|
|
362 # Reaction not found in model, skip
|
|
363 continue
|
|
364 return model_copy
|
|
365
|
|
366 def process_ras_cell(cellName, ras_row, model, rxns_ids, output_folder, save_models=False, save_models_path='saved_models/', save_models_format='csv'):
|
406
|
367 """
|
|
368 Process a single RAS cell, apply bounds, and save the bounds to a CSV file.
|
|
369
|
|
370 Args:
|
|
371 cellName (str): The name of the RAS cell (used for naming the output file).
|
|
372 ras_row (pd.Series): A row from a RAS DataFrame containing scaling factors for reaction bounds.
|
|
373 model (cobra.Model): The metabolic model to be modified.
|
|
374 rxns_ids (list of str): List of reaction IDs to which the scaling factors will be applied.
|
|
375 output_folder (str): Folder path where the output CSV file will be saved.
|
411
|
376 save_models (bool): Whether to save models with applied bounds.
|
|
377 save_models_path (str): Path where to save models.
|
|
378 save_models_format (str): Format for saved models.
|
406
|
379
|
|
380 Returns:
|
|
381 None
|
|
382 """
|
|
383 bounds = pd.DataFrame([(rxn.lower_bound, rxn.upper_bound) for rxn in model.reactions], index=rxns_ids, columns=["lower_bound", "upper_bound"])
|
|
384 new_bounds = apply_ras_bounds(bounds, ras_row)
|
|
385 new_bounds.to_csv(output_folder + cellName + ".csv", sep='\t', index=True)
|
411
|
386
|
|
387 # Save model if requested
|
|
388 if save_models:
|
|
389 modified_model = apply_bounds_to_model(model, new_bounds)
|
|
390 save_model(modified_model, cellName, save_models_path, save_models_format)
|
|
391
|
406
|
392 pass
|
|
393
|
414
|
394 def generate_bounds_model(model: cobra.Model, ras=None, output_folder='output/', save_models=False, save_models_path='saved_models/', save_models_format='csv') -> pd.DataFrame:
|
406
|
395 """
|
|
396 Generate reaction bounds for a metabolic model based on medium conditions and optional RAS adjustments.
|
|
397
|
|
398 Args:
|
|
399 model (cobra.Model): The metabolic model for which bounds will be generated.
|
|
400 ras (pd.DataFrame, optional): RAS pandas dataframe. Defaults to None.
|
|
401 output_folder (str, optional): Folder path where output CSV files will be saved. Defaults to 'output/'.
|
411
|
402 save_models (bool): Whether to save models with applied bounds.
|
|
403 save_models_path (str): Path where to save models.
|
|
404 save_models_format (str): Format for saved models.
|
406
|
405
|
|
406 Returns:
|
|
407 pd.DataFrame: DataFrame containing the bounds of reactions in the model.
|
|
408 """
|
407
|
409 rxns_ids = [rxn.id for rxn in model.reactions]
|
406
|
410
|
|
411 # Perform Flux Variability Analysis (FVA) on this medium
|
|
412 df_FVA = cobra.flux_analysis.flux_variability_analysis(model, fraction_of_optimum=0, processes=1).round(8)
|
|
413
|
|
414 # Set FVA bounds
|
|
415 for reaction in rxns_ids:
|
|
416 model.reactions.get_by_id(reaction).lower_bound = float(df_FVA.loc[reaction, "minimum"])
|
|
417 model.reactions.get_by_id(reaction).upper_bound = float(df_FVA.loc[reaction, "maximum"])
|
|
418
|
|
419 if ras is not None:
|
411
|
420 Parallel(n_jobs=cpu_count())(delayed(process_ras_cell)(
|
|
421 cellName, ras_row, model, rxns_ids, output_folder,
|
|
422 save_models, save_models_path, save_models_format
|
|
423 ) for cellName, ras_row in ras.iterrows())
|
406
|
424 else:
|
|
425 bounds = pd.DataFrame([(rxn.lower_bound, rxn.upper_bound) for rxn in model.reactions], index=rxns_ids, columns=["lower_bound", "upper_bound"])
|
|
426 newBounds = apply_ras_bounds(bounds, pd.Series([1]*len(rxns_ids), index=rxns_ids))
|
|
427 newBounds.to_csv(output_folder + "bounds.csv", sep='\t', index=True)
|
411
|
428
|
|
429 # Save model if requested
|
|
430 if save_models:
|
|
431 modified_model = apply_bounds_to_model(model, newBounds)
|
|
432 save_model(modified_model, "model_with_bounds", save_models_path, save_models_format)
|
|
433
|
406
|
434 pass
|
|
435
|
|
436 ############################# main ###########################################
|
|
437 def main(args:List[str] = None) -> None:
|
|
438 """
|
|
439 Initializes everything and sets the program in motion based on the fronted input arguments.
|
|
440
|
|
441 Returns:
|
|
442 None
|
|
443 """
|
|
444 if not os.path.exists('ras_to_bounds'):
|
|
445 os.makedirs('ras_to_bounds')
|
|
446
|
|
447 global ARGS
|
|
448 ARGS = process_args(args)
|
|
449
|
|
450 if(ARGS.ras_selector == True):
|
|
451 ras_file_list = ARGS.input_ras.split(",")
|
|
452 ras_file_names = ARGS.name.split(",")
|
|
453 if len(ras_file_names) != len(set(ras_file_names)):
|
|
454 error_message = "Duplicated file names in the uploaded RAS matrices."
|
|
455 warning(error_message)
|
|
456 raise ValueError(error_message)
|
|
457 pass
|
|
458 ras_class_names = []
|
|
459 for file in ras_file_names:
|
|
460 ras_class_names.append(file.rsplit(".", 1)[0])
|
|
461 ras_list = []
|
|
462 class_assignments = pd.DataFrame(columns=["Patient_ID", "Class"])
|
|
463 for ras_matrix, ras_class_name in zip(ras_file_list, ras_class_names):
|
|
464 ras = read_dataset(ras_matrix, "ras dataset")
|
|
465 ras.replace("None", None, inplace=True)
|
|
466 ras.set_index("Reactions", drop=True, inplace=True)
|
|
467 ras = ras.T
|
|
468 ras = ras.astype(float)
|
|
469 if(len(ras_file_list)>1):
|
|
470 #append class name to patient id (dataframe index)
|
|
471 ras.index = [f"{idx}_{ras_class_name}" for idx in ras.index]
|
|
472 else:
|
|
473 ras.index = [f"{idx}" for idx in ras.index]
|
|
474 ras_list.append(ras)
|
|
475 for patient_id in ras.index:
|
|
476 class_assignments.loc[class_assignments.shape[0]] = [patient_id, ras_class_name]
|
|
477
|
|
478
|
|
479 # Concatenate all ras DataFrames into a single DataFrame
|
|
480 ras_combined = pd.concat(ras_list, axis=0)
|
|
481 # Normalize the RAS values by max RAS
|
|
482 ras_combined = ras_combined.div(ras_combined.max(axis=0))
|
|
483 ras_combined.dropna(axis=1, how='all', inplace=True)
|
|
484
|
408
|
485 model = utils.build_cobra_model_from_csv(ARGS.model_upload)
|
407
|
486
|
408
|
487 validation = utils.validate_model(model)
|
406
|
488
|
407
|
489 print("\n=== VALIDAZIONE MODELLO ===")
|
|
490 for key, value in validation.items():
|
|
491 print(f"{key}: {value}")
|
|
492
|
406
|
493 if(ARGS.ras_selector == True):
|
414
|
494 generate_bounds_model(model, ras=ras_combined, output_folder=ARGS.output_path,
|
411
|
495 save_models=ARGS.save_models, save_models_path=ARGS.save_models_path,
|
|
496 save_models_format=ARGS.save_models_format)
|
|
497 class_assignments.to_csv(ARGS.cell_class, sep='\t', index=False)
|
406
|
498 else:
|
414
|
499 generate_bounds_model(model, output_folder=ARGS.output_path,
|
411
|
500 save_models=ARGS.save_models, save_models_path=ARGS.save_models_path,
|
|
501 save_models_format=ARGS.save_models_format)
|
406
|
502
|
|
503 pass
|
|
504
|
|
505 ##############################################################################
|
|
506 if __name__ == "__main__":
|
|
507 main() |