Mercurial > repos > bimib > cobraxy
changeset 411:6b015d3184ab draft
Uploaded
author | francesco_lapi |
---|---|
date | Mon, 08 Sep 2025 21:07:34 +0000 |
parents | d660c5b03c14 |
children | bdf4630ac1eb |
files | COBRAxy/custom_data_generator_beta.py COBRAxy/flux_simulation_beta.py COBRAxy/flux_simulation_beta.xml COBRAxy/ras_to_bounds_beta.py COBRAxy/ras_to_bounds_beta.xml COBRAxy/utils/general_utils.py |
diffstat | 6 files changed, 281 insertions(+), 161 deletions(-) [+] |
line wrap: on
line diff
--- a/COBRAxy/custom_data_generator_beta.py Mon Sep 08 17:33:52 2025 +0000 +++ b/COBRAxy/custom_data_generator_beta.py Mon Sep 08 21:07:34 2025 +0000 @@ -72,125 +72,7 @@ raise utils.DataErr(file_path, f"Formato \"{file_path.ext}\" non riconosciuto, sono supportati solo file JSON e XML") -################################- DATA GENERATION -################################ -ReactionId = str -def generate_rules(model: cobra.Model, *, asParsed = True) -> Union[Dict[ReactionId, rulesUtils.OpList], Dict[ReactionId, str]]: - """ - Generates a dictionary mapping reaction ids to rules from the model. - Args: - model : the model to derive data from. - asParsed : if True parses the rules to an optimized runtime format, otherwise leaves them as strings. - - Returns: - Dict[ReactionId, rulesUtils.OpList] : the generated dictionary of parsed rules. - Dict[ReactionId, str] : the generated dictionary of raw rules. - """ - # Is the below approach convoluted? yes - # Ok but is it inefficient? probably - # Ok but at least I don't have to repeat the check at every rule (I'm clinically insane) - _ruleGetter = lambda reaction : reaction.gene_reaction_rule - ruleExtractor = (lambda reaction : - rulesUtils.parseRuleToNestedList(_ruleGetter(reaction))) if asParsed else _ruleGetter - - return { - reaction.id : ruleExtractor(reaction) - for reaction in model.reactions - if reaction.gene_reaction_rule } - -def generate_reactions(model :cobra.Model, *, asParsed = True) -> Dict[ReactionId, str]: - """ - Generates a dictionary mapping reaction ids to reaction formulas from the model. - - Args: - model : the model to derive data from. - asParsed : if True parses the reactions to an optimized runtime format, otherwise leaves them as they are. - - Returns: - Dict[ReactionId, str] : the generated dictionary. - """ - - unparsedReactions = { - reaction.id : reaction.reaction - for reaction in model.reactions - if reaction.reaction - } - - if not asParsed: return unparsedReactions - - return reactionUtils.create_reaction_dict(unparsedReactions) - -def get_medium(model:cobra.Model) -> pd.DataFrame: - trueMedium=[] - for r in model.reactions: - positiveCoeff=0 - for m in r.metabolites: - if r.get_coefficient(m.id)>0: - positiveCoeff=1; - if (positiveCoeff==0 and r.lower_bound<0): - trueMedium.append(r.id) - - df_medium = pd.DataFrame() - df_medium["reaction"] = trueMedium - return df_medium - -def generate_bounds(model:cobra.Model) -> pd.DataFrame: - - rxns = [] - for reaction in model.reactions: - rxns.append(reaction.id) - - bounds = pd.DataFrame(columns = ["lower_bound", "upper_bound"], index=rxns) - - for reaction in model.reactions: - bounds.loc[reaction.id] = [reaction.lower_bound, reaction.upper_bound] - return bounds - - - -def generate_compartments(model: cobra.Model) -> pd.DataFrame: - """ - Generates a DataFrame containing compartment information for each reaction. - Creates columns for each compartment position (Compartment_1, Compartment_2, etc.) - - Args: - model: the COBRA model to extract compartment data from. - - Returns: - pd.DataFrame: DataFrame with ReactionID and compartment columns - """ - pathway_data = [] - - # First pass: determine the maximum number of pathways any reaction has - max_pathways = 0 - reaction_pathways = {} - - for reaction in model.reactions: - # Get unique pathways from all metabolites in the reaction - if type(reaction.annotation['pathways']) == list: - reaction_pathways[reaction.id] = reaction.annotation['pathways'] - max_pathways = max(max_pathways, len(reaction.annotation['pathways'])) - else: - reaction_pathways[reaction.id] = [reaction.annotation['pathways']] - - # Create column names for pathways - pathway_columns = [f"Pathway_{i+1}" for i in range(max_pathways)] - - # Second pass: create the data - for reaction_id, pathways in reaction_pathways.items(): - row = {"ReactionID": reaction_id} - - # Fill pathway columns - for i in range(max_pathways): - col_name = pathway_columns[i] - if i < len(pathways): - row[col_name] = pathways[i] - else: - row[col_name] = None # or "" if you prefer empty strings - - pathway_data.append(row) - - return pd.DataFrame(pathway_data) ###############################- FILE SAVING -################################ @@ -296,12 +178,12 @@ model = utils.convert_genes(model, ARGS.gene_format.replace("HGNC_", "HGNC ")) # generate data - rules = generate_rules(model, asParsed = False) - reactions = generate_reactions(model, asParsed = False) - bounds = generate_bounds(model) - medium = get_medium(model) + rules = utils.generate_rules(model, asParsed = False) + reactions = utils.generate_reactions(model, asParsed = False) + bounds = utils.generate_bounds(model) + medium = utils.get_medium(model) if ARGS.name == "ENGRO2": - compartments = generate_compartments(model) + compartments = utils.generate_compartments(model) df_rules = pd.DataFrame(list(rules.items()), columns = ["ReactionID", "Rule"]) df_reactions = pd.DataFrame(list(reactions.items()), columns = ["ReactionID", "Reaction"]) @@ -324,10 +206,8 @@ #merged.to_csv(out_file, sep = '\t', index = False) - #### - if not ARGS.out_tabular: raise utils.ArgsErr("out_tabular", "output path (--out_tabular) is required when output_format == tabular", ARGS.out_tabular) save_as_tabular_df(merged, ARGS.out_tabular)
--- a/COBRAxy/flux_simulation_beta.py Mon Sep 08 17:33:52 2025 +0000 +++ b/COBRAxy/flux_simulation_beta.py Mon Sep 08 21:07:34 2025 +0000 @@ -9,6 +9,7 @@ from joblib import Parallel, delayed, cpu_count from cobra.sampling import OptGPSampler import sys +import utils.general_utils as utils ################################# process args ###############################
--- a/COBRAxy/flux_simulation_beta.xml Mon Sep 08 17:33:52 2025 +0000 +++ b/COBRAxy/flux_simulation_beta.xml Mon Sep 08 21:07:34 2025 +0000 @@ -42,7 +42,6 @@ <param name="model_upload" argument="--model_upload" type="data" format="csv,tsv,tabular" label="Model rules file:" help="Upload a CSV/TSV file containing reaction rules generated by the Model Initialization tool." /> - <param name="inputs" argument="--inputs" multiple="true" type="data" format="tabular, csv, tsv" label="Bound(s):" />
--- a/COBRAxy/ras_to_bounds_beta.py Mon Sep 08 17:33:52 2025 +0000 +++ b/COBRAxy/ras_to_bounds_beta.py Mon Sep 08 21:07:34 2025 +0000 @@ -30,9 +30,6 @@ parser.add_argument("-mo", "--model_upload", type = str, help = "path to input file with custom rules, if provided") - - parser.add_argument("-meo", "--medium", type = str, - help = "path to input file with custom medium, if provided") parser.add_argument('-ol', '--out_log', help = "Output log") @@ -65,6 +62,21 @@ default='ras_to_bounds/', help = 'output path for maps') + parser.add_argument('-sm', '--save_models', + type=utils.Bool("save_models"), + default=False, + help = 'whether to save models with applied bounds') + + parser.add_argument('-smp', '--save_models_path', + type = str, + default='saved_models/', + help = 'output path for saved models') + + parser.add_argument('-smf', '--save_models_format', + type = str, + default='csv', + help = 'format for saved models (csv, xml, json, mat, yaml, tabular)') + ARGS = parser.parse_args(args) return ARGS @@ -80,8 +92,9 @@ Returns: None """ - with open(ARGS.out_log, 'a') as log: - log.write(s + "\n\n") + if ARGS.out_log: + with open(ARGS.out_log, 'a') as log: + log.write(s + "\n\n") print(s) ############################ dataset input #################################### @@ -136,7 +149,99 @@ new_bounds.loc[reaction, "upper_bound"] = valMax return new_bounds -def process_ras_cell(cellName, ras_row, model, rxns_ids, output_folder): +def save_model(model, filename, output_folder, file_format='csv'): + """ + Save a COBRA model to file in the specified format. + + Args: + model (cobra.Model): The model to save. + filename (str): Base filename (without extension). + output_folder (str): Output directory. + file_format (str): File format ('xml', 'json', 'mat', 'yaml', 'tabular', 'csv'). + + Returns: + None + """ + if not os.path.exists(output_folder): + os.makedirs(output_folder) + + try: + if file_format == 'tabular' or file_format == 'csv': + # Special handling for tabular format using utils functions + filepath = os.path.join(output_folder, f"{filename}.csv") + + rules = utils.generate_rules(model, asParsed = False) + reactions = utils.generate_reactions(model, asParsed = False) + bounds = utils.generate_bounds(model) + medium = utils.get_medium(model) + + try: + compartments = utils.generate_compartments(model) + except: + compartments = None + + df_rules = pd.DataFrame(list(rules.items()), columns = ["ReactionID", "Rule"]) + df_reactions = pd.DataFrame(list(reactions.items()), columns = ["ReactionID", "Reaction"]) + df_bounds = bounds.reset_index().rename(columns = {"index": "ReactionID"}) + df_medium = medium.rename(columns = {"reaction": "ReactionID"}) + df_medium["InMedium"] = True # flag per indicare la presenza nel medium + + merged = df_reactions.merge(df_rules, on = "ReactionID", how = "outer") + merged = merged.merge(df_bounds, on = "ReactionID", how = "outer") + + # Add compartments only if they exist and model name is ENGRO2 + if compartments is not None and hasattr(ARGS, 'name') and ARGS.name == "ENGRO2": + merged = merged.merge(compartments, on = "ReactionID", how = "outer") + + merged = merged.merge(df_medium, on = "ReactionID", how = "left") + merged["InMedium"] = merged["InMedium"].fillna(False) + merged = merged.sort_values(by = "InMedium", ascending = False) + + merged.to_csv(filepath, sep="\t", index=False) + + else: + # Standard COBRA formats + filepath = os.path.join(output_folder, f"{filename}.{file_format}") + + if file_format == 'xml': + cobra.io.write_sbml_model(model, filepath) + elif file_format == 'json': + cobra.io.save_json_model(model, filepath) + elif file_format == 'mat': + cobra.io.save_matlab_model(model, filepath) + elif file_format == 'yaml': + cobra.io.save_yaml_model(model, filepath) + else: + raise ValueError(f"Unsupported format: {file_format}") + + print(f"Model saved: {filepath}") + + except Exception as e: + warning(f"Error saving model {filename}: {str(e)}") + +def apply_bounds_to_model(model, bounds): + """ + Apply bounds from a DataFrame to a COBRA model. + + Args: + model (cobra.Model): The metabolic model to modify. + bounds (pd.DataFrame): DataFrame with reaction bounds. + + Returns: + cobra.Model: Modified model with new bounds. + """ + model_copy = model.copy() + for reaction_id in bounds.index: + try: + reaction = model_copy.reactions.get_by_id(reaction_id) + reaction.lower_bound = bounds.loc[reaction_id, "lower_bound"] + reaction.upper_bound = bounds.loc[reaction_id, "upper_bound"] + except KeyError: + # Reaction not found in model, skip + continue + return model_copy + +def process_ras_cell(cellName, ras_row, model, rxns_ids, output_folder, save_models=False, save_models_path='saved_models/', save_models_format='csv'): """ Process a single RAS cell, apply bounds, and save the bounds to a CSV file. @@ -146,6 +251,9 @@ model (cobra.Model): The metabolic model to be modified. rxns_ids (list of str): List of reaction IDs to which the scaling factors will be applied. output_folder (str): Folder path where the output CSV file will be saved. + save_models (bool): Whether to save models with applied bounds. + save_models_path (str): Path where to save models. + save_models_format (str): Format for saved models. Returns: None @@ -153,17 +261,25 @@ bounds = pd.DataFrame([(rxn.lower_bound, rxn.upper_bound) for rxn in model.reactions], index=rxns_ids, columns=["lower_bound", "upper_bound"]) new_bounds = apply_ras_bounds(bounds, ras_row) new_bounds.to_csv(output_folder + cellName + ".csv", sep='\t', index=True) + + # Save model if requested + if save_models: + modified_model = apply_bounds_to_model(model, new_bounds) + save_model(modified_model, cellName, save_models_path, save_models_format) + pass -def generate_bounds(model: cobra.Model, ras=None, output_folder='output/') -> pd.DataFrame: +def generate_bounds(model: cobra.Model, ras=None, output_folder='output/', save_models=False, save_models_path='saved_models/', save_models_format='csv') -> pd.DataFrame: """ Generate reaction bounds for a metabolic model based on medium conditions and optional RAS adjustments. Args: model (cobra.Model): The metabolic model for which bounds will be generated. - medium (dict): A dictionary where keys are reaction IDs and values are the medium conditions. ras (pd.DataFrame, optional): RAS pandas dataframe. Defaults to None. output_folder (str, optional): Folder path where output CSV files will be saved. Defaults to 'output/'. + save_models (bool): Whether to save models with applied bounds. + save_models_path (str): Path where to save models. + save_models_format (str): Format for saved models. Returns: pd.DataFrame: DataFrame containing the bounds of reactions in the model. @@ -179,11 +295,20 @@ model.reactions.get_by_id(reaction).upper_bound = float(df_FVA.loc[reaction, "maximum"]) if ras is not None: - Parallel(n_jobs=cpu_count())(delayed(process_ras_cell)(cellName, ras_row, model, rxns_ids, output_folder) for cellName, ras_row in ras.iterrows()) + Parallel(n_jobs=cpu_count())(delayed(process_ras_cell)( + cellName, ras_row, model, rxns_ids, output_folder, + save_models, save_models_path, save_models_format + ) for cellName, ras_row in ras.iterrows()) else: bounds = pd.DataFrame([(rxn.lower_bound, rxn.upper_bound) for rxn in model.reactions], index=rxns_ids, columns=["lower_bound", "upper_bound"]) newBounds = apply_ras_bounds(bounds, pd.Series([1]*len(rxns_ids), index=rxns_ids)) newBounds.to_csv(output_folder + "bounds.csv", sep='\t', index=True) + + # Save model if requested + if save_models: + modified_model = apply_bounds_to_model(model, newBounds) + save_model(modified_model, "model_with_bounds", save_models_path, save_models_format) + pass ############################# main ########################################### @@ -197,7 +322,6 @@ if not os.path.exists('ras_to_bounds'): os.makedirs('ras_to_bounds') - global ARGS ARGS = process_args(args) @@ -236,16 +360,6 @@ ras_combined = ras_combined.div(ras_combined.max(axis=0)) ras_combined.dropna(axis=1, how='all', inplace=True) - - - #model_type :utils.Model = ARGS.model_selector - #if model_type is utils.Model.Custom: - # model = model_type.getCOBRAmodel(customPath = utils.FilePath.fromStrPath(ARGS.model), customExtension = utils.FilePath.fromStrPath(ARGS.model_name).ext) - #else: - # model = model_type.getCOBRAmodel(toolDir=ARGS.tool_dir) - - # TODO LOAD MODEL FROM UPLOAD - model = utils.build_cobra_model_from_csv(ARGS.model_upload) validation = utils.validate_model(model) @@ -254,22 +368,15 @@ for key, value in validation.items(): print(f"{key}: {value}") - #if(ARGS.medium_selector == "Custom"): - # medium = read_dataset(ARGS.medium, "medium dataset") - # medium.set_index(medium.columns[0], inplace=True) - # medium = medium.astype(float) - # medium = medium[medium.columns[0]].to_dict() - #else: - # df_mediums = pd.read_csv(ARGS.tool_dir + "/local/medium/medium.csv", index_col = 0) - # ARGS.medium_selector = ARGS.medium_selector.replace("_", " ") - # medium = df_mediums[[ARGS.medium_selector]] - # medium = medium[ARGS.medium_selector].to_dict() - if(ARGS.ras_selector == True): - generate_bounds(model, ras = ras_combined, output_folder=ARGS.output_path) - class_assignments.to_csv(ARGS.cell_class, sep = '\t', index = False) + generate_bounds(model, ras=ras_combined, output_folder=ARGS.output_path, + save_models=ARGS.save_models, save_models_path=ARGS.save_models_path, + save_models_format=ARGS.save_models_format) + class_assignments.to_csv(ARGS.cell_class, sep='\t', index=False) else: - generate_bounds(model, output_folder=ARGS.output_path) + generate_bounds(model, output_folder=ARGS.output_path, + save_models=ARGS.save_models, save_models_path=ARGS.save_models_path, + save_models_format=ARGS.save_models_format) pass
--- a/COBRAxy/ras_to_bounds_beta.xml Mon Sep 08 17:33:52 2025 +0000 +++ b/COBRAxy/ras_to_bounds_beta.xml Mon Sep 08 21:07:34 2025 +0000 @@ -26,6 +26,8 @@ #set $names = $names + $input_temp.element_identifier + "," #end for #end if + --save_models $save_models + --save_models_path saved_models/ --name "$names" --out_log $log ]]> @@ -45,6 +47,11 @@ </when> </conditional> + <param name="save_models" argument="--save_models" type="select" label="Save models with applied bounds?"> + <option value="False" selected="true">No</option> + <option value="True">Yes</option> + </param> + </inputs> <outputs> @@ -53,7 +60,10 @@ <collection name="ras_to_bounds" type="list" label="Ras to Bounds"> <discover_datasets name = "collection" pattern="__name_and_ext__" directory="ras_to_bounds"/> </collection> - + <collection name="saved_models" type="list" label="Saved Models (Tabular Format)"> + <filter>save_models == "True"</filter> + <discover_datasets name = "saved_models_collection" pattern="__name_and_ext__" directory="saved_models"/> + </collection> </outputs> <help>
--- a/COBRAxy/utils/general_utils.py Mon Sep 08 17:33:52 2025 +0000 +++ b/COBRAxy/utils/general_utils.py Mon Sep 08 21:07:34 2025 +0000 @@ -17,6 +17,8 @@ import gzip import bz2 from io import StringIO +import rule_parsing as rulesUtils +import reaction_parsing as reactionUtils @@ -981,3 +983,124 @@ validation['status'] = f"Error: {e}" return validation + + +################################- DATA GENERATION -################################ +ReactionId = str +def generate_rules(model: cobra.Model, *, asParsed = True) -> Union[Dict[ReactionId, rulesUtils.OpList], Dict[ReactionId, str]]: + """ + Generates a dictionary mapping reaction ids to rules from the model. + + Args: + model : the model to derive data from. + asParsed : if True parses the rules to an optimized runtime format, otherwise leaves them as strings. + + Returns: + Dict[ReactionId, rulesUtils.OpList] : the generated dictionary of parsed rules. + Dict[ReactionId, str] : the generated dictionary of raw rules. + """ + # Is the below approach convoluted? yes + # Ok but is it inefficient? probably + # Ok but at least I don't have to repeat the check at every rule (I'm clinically insane) + _ruleGetter = lambda reaction : reaction.gene_reaction_rule + ruleExtractor = (lambda reaction : + rulesUtils.parseRuleToNestedList(_ruleGetter(reaction))) if asParsed else _ruleGetter + + return { + reaction.id : ruleExtractor(reaction) + for reaction in model.reactions + if reaction.gene_reaction_rule } + +def generate_reactions(model :cobra.Model, *, asParsed = True) -> Dict[ReactionId, str]: + """ + Generates a dictionary mapping reaction ids to reaction formulas from the model. + + Args: + model : the model to derive data from. + asParsed : if True parses the reactions to an optimized runtime format, otherwise leaves them as they are. + + Returns: + Dict[ReactionId, str] : the generated dictionary. + """ + + unparsedReactions = { + reaction.id : reaction.reaction + for reaction in model.reactions + if reaction.reaction + } + + if not asParsed: return unparsedReactions + + return reactionUtils.create_reaction_dict(unparsedReactions) + +def get_medium(model:cobra.Model) -> pd.DataFrame: + trueMedium=[] + for r in model.reactions: + positiveCoeff=0 + for m in r.metabolites: + if r.get_coefficient(m.id)>0: + positiveCoeff=1; + if (positiveCoeff==0 and r.lower_bound<0): + trueMedium.append(r.id) + + df_medium = pd.DataFrame() + df_medium["reaction"] = trueMedium + return df_medium + +def generate_bounds(model:cobra.Model) -> pd.DataFrame: + + rxns = [] + for reaction in model.reactions: + rxns.append(reaction.id) + + bounds = pd.DataFrame(columns = ["lower_bound", "upper_bound"], index=rxns) + + for reaction in model.reactions: + bounds.loc[reaction.id] = [reaction.lower_bound, reaction.upper_bound] + return bounds + + + +def generate_compartments(model: cobra.Model) -> pd.DataFrame: + """ + Generates a DataFrame containing compartment information for each reaction. + Creates columns for each compartment position (Compartment_1, Compartment_2, etc.) + + Args: + model: the COBRA model to extract compartment data from. + + Returns: + pd.DataFrame: DataFrame with ReactionID and compartment columns + """ + pathway_data = [] + + # First pass: determine the maximum number of pathways any reaction has + max_pathways = 0 + reaction_pathways = {} + + for reaction in model.reactions: + # Get unique pathways from all metabolites in the reaction + if type(reaction.annotation['pathways']) == list: + reaction_pathways[reaction.id] = reaction.annotation['pathways'] + max_pathways = max(max_pathways, len(reaction.annotation['pathways'])) + else: + reaction_pathways[reaction.id] = [reaction.annotation['pathways']] + + # Create column names for pathways + pathway_columns = [f"Pathway_{i+1}" for i in range(max_pathways)] + + # Second pass: create the data + for reaction_id, pathways in reaction_pathways.items(): + row = {"ReactionID": reaction_id} + + # Fill pathway columns + for i in range(max_pathways): + col_name = pathway_columns[i] + if i < len(pathways): + row[col_name] = pathways[i] + else: + row[col_name] = None # or "" if you prefer empty strings + + pathway_data.append(row) + + return pd.DataFrame(pathway_data) \ No newline at end of file