# HG changeset patch
# User francesco_lapi
# Date 1758119218 0
# Node ID 5b625d91bc7fdb77f703ddc642b8ccfe3fb58a91
# Parent a6e45049c1b93a6a1e14df159a38de774c8a5c09
Uploaded
diff -r a6e45049c1b9 -r 5b625d91bc7f COBRAxy/custom_data_generator_beta.py
--- a/COBRAxy/custom_data_generator_beta.py Fri Sep 12 17:28:45 2025 +0000
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,255 +0,0 @@
-"""
-Custom data generator for COBRA models.
-
-This script loads a COBRA model (built-in or custom), optionally applies
-medium and gene nomenclature settings, derives reaction-related metadata
-(GPR rules, formulas, bounds, objective coefficients, medium membership,
-and compartments for ENGRO2), and writes a tabular summary.
-"""
-
-import os
-import csv
-import cobra
-import argparse
-import pandas as pd
-import utils.general_utils as utils
-from typing import Optional, Tuple, List
-import utils.model_utils as modelUtils
-import logging
-
-ARGS : argparse.Namespace
-def process_args(args: List[str] = None) -> argparse.Namespace:
- """
- Parse command-line arguments for CustomDataGenerator.
- """
-
- parser = argparse.ArgumentParser(
- usage="%(prog)s [options]",
- description="Generate custom data from a given model"
- )
-
- parser.add_argument("--out_log", type=str, required=True,
- help="Output log file")
-
- parser.add_argument("--model", type=str,
- help="Built-in model identifier (e.g., ENGRO2, Recon, HMRcore)")
- parser.add_argument("--input", type=str,
- help="Custom model file (JSON or XML)")
- parser.add_argument("--name", type=str, required=True,
- help="Model name (default or custom)")
-
- parser.add_argument("--medium_selector", type=str, required=True,
- help="Medium selection option")
-
- parser.add_argument("--gene_format", type=str, default="Default",
- help="Gene nomenclature format: Default (original), ENSNG, HGNC_SYMBOL, HGNC_ID, ENTREZ")
-
- parser.add_argument("--out_tabular", type=str,
- help="Output file for the merged dataset (CSV or XLSX)")
-
- parser.add_argument("--tool_dir", type=str, default=os.path.dirname(__file__),
- help="Tool directory (passed from Galaxy as $__tool_directory__)")
-
-
- return parser.parse_args(args)
-
-################################- INPUT DATA LOADING -################################
-def load_custom_model(file_path :utils.FilePath, ext :Optional[utils.FileFormat] = None) -> cobra.Model:
- """
- Loads a custom model from a file, either in JSON, XML, MAT, or YML format.
-
- Args:
- file_path : The path to the file containing the custom model.
- ext : explicit file extension. Necessary for standard use in galaxy because of its weird behaviour.
-
- Raises:
- DataErr : if the file is in an invalid format or cannot be opened for whatever reason.
-
- Returns:
- cobra.Model : the model, if successfully opened.
- """
- ext = ext if ext else file_path.ext
- try:
- if ext is utils.FileFormat.XML:
- return cobra.io.read_sbml_model(file_path.show())
-
- if ext is utils.FileFormat.JSON:
- return cobra.io.load_json_model(file_path.show())
-
- if ext is utils.FileFormat.MAT:
- return cobra.io.load_matlab_model(file_path.show())
-
- if ext is utils.FileFormat.YML:
- return cobra.io.load_yaml_model(file_path.show())
-
- except Exception as e: raise utils.DataErr(file_path, e.__str__())
- raise utils.DataErr(
- file_path,
- f"Unrecognized format '{file_path.ext}'. Only JSON, XML, MAT, YML are supported."
- )
-
-
-###############################- FILE SAVING -################################
-def save_as_csv_filePath(data :dict, file_path :utils.FilePath, fieldNames :Tuple[str, str]) -> None:
- """
- Saves any dictionary-shaped data in a .csv file created at the given file_path as FilePath.
-
- Args:
- data : the data to be written to the file.
- file_path : the path to the .csv file.
- fieldNames : the names of the fields (columns) in the .csv file.
-
- Returns:
- None
- """
- with open(file_path.show(), 'w', newline='') as csvfile:
- writer = csv.DictWriter(csvfile, fieldnames = fieldNames, dialect="excel-tab")
- writer.writeheader()
-
- for key, value in data.items():
- writer.writerow({ fieldNames[0] : key, fieldNames[1] : value })
-
-def save_as_csv(data :dict, file_path :str, fieldNames :Tuple[str, str]) -> None:
- """
- Saves any dictionary-shaped data in a .csv file created at the given file_path as string.
-
- Args:
- data : the data to be written to the file.
- file_path : the path to the .csv file.
- fieldNames : the names of the fields (columns) in the .csv file.
-
- Returns:
- None
- """
- with open(file_path, 'w', newline='') as csvfile:
- writer = csv.DictWriter(csvfile, fieldnames = fieldNames, dialect="excel-tab")
- writer.writeheader()
-
- for key, value in data.items():
- writer.writerow({ fieldNames[0] : key, fieldNames[1] : value })
-
-def save_as_tabular_df(df: pd.DataFrame, path: str) -> None:
- """
- Save a pandas DataFrame as a tab-separated file, creating directories as needed.
-
- Args:
- df: The DataFrame to write.
- path: Destination file path (will be written as TSV).
-
- Raises:
- DataErr: If writing the output fails for any reason.
-
- Returns:
- None
- """
- try:
- os.makedirs(os.path.dirname(path) or ".", exist_ok=True)
- df.to_csv(path, sep="\t", index=False)
- except Exception as e:
- raise utils.DataErr(path, f"failed writing tabular output: {e}")
-
-
-###############################- ENTRY POINT -################################
-def main(args:List[str] = None) -> None:
- """
- Initialize and generate custom data based on the frontend input arguments.
-
- Returns:
- None
- """
- # Parse args from frontend (Galaxy XML)
- global ARGS
- ARGS = process_args(args)
-
-
- if ARGS.input:
- # Load a custom model from file
- model = load_custom_model(
- utils.FilePath.fromStrPath(ARGS.input), utils.FilePath.fromStrPath(ARGS.name).ext)
- else:
- # Load a built-in model
-
- try:
- model_enum = utils.Model[ARGS.model] # e.g., Model['ENGRO2']
- except KeyError:
- raise utils.ArgsErr("model", "one of Recon/ENGRO2/HMRcore/Custom_model", ARGS.model)
-
- # Load built-in model (Model.getCOBRAmodel uses tool_dir to locate local models)
- try:
- model = model_enum.getCOBRAmodel(toolDir=ARGS.tool_dir)
- except Exception as e:
- # Wrap/normalize load errors as DataErr for consistency
- raise utils.DataErr(ARGS.model, f"failed loading built-in model: {e}")
-
- # Determine final model name: explicit --name overrides, otherwise use the model id
-
- model_name = ARGS.name if ARGS.name else ARGS.model
-
- if ARGS.name == "ENGRO2" and ARGS.medium_selector != "Default":
- df_mediums = pd.read_csv(ARGS.tool_dir + "/local/medium/medium.csv", index_col = 0)
- ARGS.medium_selector = ARGS.medium_selector.replace("_", " ")
- medium = df_mediums[[ARGS.medium_selector]]
- medium = medium[ARGS.medium_selector].to_dict()
-
- # Reset all medium reactions lower bound to zero
- for rxn_id, _ in model.medium.items():
- model.reactions.get_by_id(rxn_id).lower_bound = float(0.0)
-
- # Apply selected medium uptake bounds (negative for uptake)
- for reaction, value in medium.items():
- if value is not None:
- model.reactions.get_by_id(reaction).lower_bound = -float(value)
-
- if (ARGS.name == "Recon" or ARGS.name == "ENGRO2") and ARGS.gene_format != "Default":
- logging.basicConfig(level=logging.INFO)
- logger = logging.getLogger(__name__)
-
- model = modelUtils.translate_model_genes(
- model=model,
- mapping_df= pd.read_csv(ARGS.tool_dir + "/local/mappings/genes_human.csv", dtype={'entrez_id': str}),
- target_nomenclature=ARGS.gene_format,
- source_nomenclature='HGNC_symbol',
- logger=logger
- )
-
- # generate data
- rules = modelUtils.generate_rules(model, asParsed = False)
- reactions = modelUtils.generate_reactions(model, asParsed = False)
- bounds = modelUtils.generate_bounds(model)
- medium = modelUtils.get_medium(model)
- objective_function = modelUtils.extract_objective_coefficients(model)
-
- if ARGS.name == "ENGRO2":
- compartments = modelUtils.generate_compartments(model)
-
- df_rules = pd.DataFrame(list(rules.items()), columns = ["ReactionID", "GPR"])
- df_reactions = pd.DataFrame(list(reactions.items()), columns = ["ReactionID", "Formula"])
-
- df_bounds = bounds.reset_index().rename(columns = {"index": "ReactionID"})
- df_medium = medium.rename(columns = {"reaction": "ReactionID"})
- df_medium["InMedium"] = True
-
- merged = df_reactions.merge(df_rules, on = "ReactionID", how = "outer")
- merged = merged.merge(df_bounds, on = "ReactionID", how = "outer")
- merged = merged.merge(objective_function, on = "ReactionID", how = "outer")
- if ARGS.name == "ENGRO2":
- merged = merged.merge(compartments, on = "ReactionID", how = "outer")
- merged = merged.merge(df_medium, on = "ReactionID", how = "left")
-
- merged["InMedium"] = merged["InMedium"].fillna(False)
-
- merged = merged.sort_values(by = "InMedium", ascending = False)
-
- if not ARGS.out_tabular:
- raise utils.ArgsErr("out_tabular", "output path (--out_tabular) is required when output_format == tabular", ARGS.out_tabular)
- save_as_tabular_df(merged, ARGS.out_tabular)
- expected = ARGS.out_tabular
-
- # verify output exists and non-empty
- if not expected or not os.path.exists(expected) or os.path.getsize(expected) == 0:
- raise utils.DataErr(expected, "Output not created or empty")
-
- print("CustomDataGenerator: completed successfully")
-
-if __name__ == '__main__':
- main()
\ No newline at end of file
diff -r a6e45049c1b9 -r 5b625d91bc7f COBRAxy/custom_data_generator_beta.xml
--- a/COBRAxy/custom_data_generator_beta.xml Fri Sep 12 17:28:45 2025 +0000
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,118 +0,0 @@
-
-
-
- numpy
- pandas
- cobra
- lxml
-
-
-
- marea_macros.xml
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff -r a6e45049c1b9 -r 5b625d91bc7f COBRAxy/flux_simulation_beta.xml
--- a/COBRAxy/flux_simulation_beta.xml Fri Sep 12 17:28:45 2025 +0000
+++ b/COBRAxy/flux_simulation_beta.xml Wed Sep 17 14:26:58 2025 +0000
@@ -57,14 +57,14 @@
-
+
-
+
-
-
+
+
-
-
+
+
@@ -131,10 +131,12 @@
What it does
-------------
-This tool generates flux samples starting from metabolic models using CBS (Corner-based sampling) or OPTGP (Improved Artificial Centering Hit-and-Run sampler) algorithms.
+This tool generates flux distributions for each samples using:
+1. a sampling-based strategy: CBS (Corner-based sampling) or OPTGP (Improved Artificial Centering Hit-and-Run sampler) algorithms.
+2. an optimization-based strategy: parsimonious-FBA (optimized by Biomass), FVA (with configurable optimality percentage), Biomass sensitivity analysis (single reaction knock-out)
Two upload modes are supported:
-1. **Model + bounds**: Upload one base model and multiple bound files (one per context/cell type)
+1. **Model + bounds**: Upload one base model (tabular file) and multiple bound files (one per context/cell type)
2. **Multiple complete models**: Upload multiple complete model files, each with integrated bounds
It can return sampled fluxes by applying summary statistics:
@@ -142,11 +144,6 @@
- median
- quantiles (0.25, 0.50, 0.75)
-Flux analysis can be performed over the metabolic model:
- - parsimonious-FBA (optimized by Biomass)
- - FVA (with configurable optimality percentage)
- - Biomass sensitivity analysis (single reaction knock-out)
-
Output:
-------------
@@ -160,4 +157,6 @@
]]>
-
\ No newline at end of file
+
+
+
diff -r a6e45049c1b9 -r 5b625d91bc7f COBRAxy/flux_to_map.xml
--- a/COBRAxy/flux_to_map.xml Fri Sep 12 17:28:45 2025 +0000
+++ b/COBRAxy/flux_to_map.xml Wed Sep 17 14:26:58 2025 +0000
@@ -87,7 +87,7 @@
-
+
@@ -254,4 +254,5 @@
]]>
-
\ No newline at end of file
+
+
diff -r a6e45049c1b9 -r 5b625d91bc7f COBRAxy/metabolic_model_setting.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/COBRAxy/metabolic_model_setting.py Wed Sep 17 14:26:58 2025 +0000
@@ -0,0 +1,256 @@
+"""
+Scripts to generate a tabular file of a metabolic model (built-in or custom).
+
+This script loads a COBRA model (built-in or custom), optionally applies
+medium and gene nomenclature settings, derives reaction-related metadata
+(GPR rules, formulas, bounds, objective coefficients, medium membership,
+and compartments for ENGRO2), and writes a tabular summary.
+"""
+
+import os
+import csv
+import cobra
+import argparse
+import pandas as pd
+import utils.general_utils as utils
+from typing import Optional, Tuple, List
+import utils.model_utils as modelUtils
+import logging
+
+ARGS : argparse.Namespace
+def process_args(args: List[str] = None) -> argparse.Namespace:
+ """
+ Parse command-line arguments for metabolic_model_setting.
+ """
+
+ parser = argparse.ArgumentParser(
+ usage="%(prog)s [options]",
+ description="Generate custom data from a given model"
+ )
+
+ parser.add_argument("--out_log", type=str, required=True,
+ help="Output log file")
+
+ parser.add_argument("--model", type=str,
+ help="Built-in model identifier (e.g., ENGRO2, Recon, HMRcore)")
+ parser.add_argument("--input", type=str,
+ help="Custom model file (JSON or XML)")
+ parser.add_argument("--name", type=str, required=True,
+ help="Model name (default or custom)")
+
+ parser.add_argument("--medium_selector", type=str, required=True,
+ help="Medium selection option")
+
+ parser.add_argument("--gene_format", type=str, default="Default",
+ help="Gene nomenclature format: Default (original), ENSNG, HGNC_SYMBOL, HGNC_ID, ENTREZ")
+
+ parser.add_argument("--out_tabular", type=str,
+ help="Output file for the merged dataset (CSV or XLSX)")
+
+ parser.add_argument("--tool_dir", type=str, default=os.path.dirname(__file__),
+ help="Tool directory (passed from Galaxy as $__tool_directory__)")
+
+
+ return parser.parse_args(args)
+
+################################- INPUT DATA LOADING -################################
+def load_custom_model(file_path :utils.FilePath, ext :Optional[utils.FileFormat] = None) -> cobra.Model:
+ """
+ Loads a custom model from a file, either in JSON, XML, MAT, or YML format.
+
+ Args:
+ file_path : The path to the file containing the custom model.
+ ext : explicit file extension. Necessary for standard use in galaxy because of its weird behaviour.
+
+ Raises:
+ DataErr : if the file is in an invalid format or cannot be opened for whatever reason.
+
+ Returns:
+ cobra.Model : the model, if successfully opened.
+ """
+ ext = ext if ext else file_path.ext
+ try:
+ if ext is utils.FileFormat.XML:
+ return cobra.io.read_sbml_model(file_path.show())
+
+ if ext is utils.FileFormat.JSON:
+ return cobra.io.load_json_model(file_path.show())
+
+ if ext is utils.FileFormat.MAT:
+ return cobra.io.load_matlab_model(file_path.show())
+
+ if ext is utils.FileFormat.YML:
+ return cobra.io.load_yaml_model(file_path.show())
+
+ except Exception as e: raise utils.DataErr(file_path, e.__str__())
+ raise utils.DataErr(
+ file_path,
+ f"Unrecognized format '{file_path.ext}'. Only JSON, XML, MAT, YML are supported."
+ )
+
+
+###############################- FILE SAVING -################################
+def save_as_csv_filePath(data :dict, file_path :utils.FilePath, fieldNames :Tuple[str, str]) -> None:
+ """
+ Saves any dictionary-shaped data in a .csv file created at the given file_path as FilePath.
+
+ Args:
+ data : the data to be written to the file.
+ file_path : the path to the .csv file.
+ fieldNames : the names of the fields (columns) in the .csv file.
+
+ Returns:
+ None
+ """
+ with open(file_path.show(), 'w', newline='') as csvfile:
+ writer = csv.DictWriter(csvfile, fieldnames = fieldNames, dialect="excel-tab")
+ writer.writeheader()
+
+ for key, value in data.items():
+ writer.writerow({ fieldNames[0] : key, fieldNames[1] : value })
+
+def save_as_csv(data :dict, file_path :str, fieldNames :Tuple[str, str]) -> None:
+ """
+ Saves any dictionary-shaped data in a .csv file created at the given file_path as string.
+
+ Args:
+ data : the data to be written to the file.
+ file_path : the path to the .csv file.
+ fieldNames : the names of the fields (columns) in the .csv file.
+
+ Returns:
+ None
+ """
+ with open(file_path, 'w', newline='') as csvfile:
+ writer = csv.DictWriter(csvfile, fieldnames = fieldNames, dialect="excel-tab")
+ writer.writeheader()
+
+ for key, value in data.items():
+ writer.writerow({ fieldNames[0] : key, fieldNames[1] : value })
+
+def save_as_tabular_df(df: pd.DataFrame, path: str) -> None:
+ """
+ Save a pandas DataFrame as a tab-separated file, creating directories as needed.
+
+ Args:
+ df: The DataFrame to write.
+ path: Destination file path (will be written as TSV).
+
+ Raises:
+ DataErr: If writing the output fails for any reason.
+
+ Returns:
+ None
+ """
+ try:
+ os.makedirs(os.path.dirname(path) or ".", exist_ok=True)
+ df.to_csv(path, sep="\t", index=False)
+ except Exception as e:
+ raise utils.DataErr(path, f"failed writing tabular output: {e}")
+
+
+###############################- ENTRY POINT -################################
+def main(args:List[str] = None) -> None:
+ """
+ Initialize and generate custom data based on the frontend input arguments.
+
+ Returns:
+ None
+ """
+ # Parse args from frontend (Galaxy XML)
+ global ARGS
+ ARGS = process_args(args)
+
+
+ if ARGS.input:
+ # Load a custom model from file
+ model = load_custom_model(
+ utils.FilePath.fromStrPath(ARGS.input), utils.FilePath.fromStrPath(ARGS.name).ext)
+ else:
+ # Load a built-in model
+
+ try:
+ model_enum = utils.Model[ARGS.model] # e.g., Model['ENGRO2']
+ except KeyError:
+ raise utils.ArgsErr("model", "one of Recon/ENGRO2/HMRcore/Custom_model", ARGS.model)
+
+ # Load built-in model (Model.getCOBRAmodel uses tool_dir to locate local models)
+ try:
+ model = model_enum.getCOBRAmodel(toolDir=ARGS.tool_dir)
+ except Exception as e:
+ # Wrap/normalize load errors as DataErr for consistency
+ raise utils.DataErr(ARGS.model, f"failed loading built-in model: {e}")
+
+ # Determine final model name: explicit --name overrides, otherwise use the model id
+
+ model_name = ARGS.name if ARGS.name else ARGS.model
+
+ if ARGS.name == "ENGRO2" and ARGS.medium_selector != "Default":
+ df_mediums = pd.read_csv(ARGS.tool_dir + "/local/medium/medium.csv", index_col = 0)
+ ARGS.medium_selector = ARGS.medium_selector.replace("_", " ")
+ medium = df_mediums[[ARGS.medium_selector]]
+ medium = medium[ARGS.medium_selector].to_dict()
+
+ # Reset all medium reactions lower bound to zero
+ for rxn_id, _ in model.medium.items():
+ model.reactions.get_by_id(rxn_id).lower_bound = float(0.0)
+
+ # Apply selected medium uptake bounds (negative for uptake)
+ for reaction, value in medium.items():
+ if value is not None:
+ model.reactions.get_by_id(reaction).lower_bound = -float(value)
+
+ if (ARGS.name == "Recon" or ARGS.name == "ENGRO2") and ARGS.gene_format != "Default":
+ logging.basicConfig(level=logging.INFO)
+ logger = logging.getLogger(__name__)
+
+ model = modelUtils.translate_model_genes(
+ model=model,
+ mapping_df= pd.read_csv(ARGS.tool_dir + "/local/mappings/genes_human.csv", dtype={'entrez_id': str}),
+ target_nomenclature=ARGS.gene_format,
+ source_nomenclature='HGNC_symbol',
+ logger=logger
+ )
+
+ # generate data
+ rules = modelUtils.generate_rules(model, asParsed = False)
+ reactions = modelUtils.generate_reactions(model, asParsed = False)
+ bounds = modelUtils.generate_bounds(model)
+ medium = modelUtils.get_medium(model)
+ objective_function = modelUtils.extract_objective_coefficients(model)
+
+ if ARGS.name == "ENGRO2":
+ compartments = modelUtils.generate_compartments(model)
+
+ df_rules = pd.DataFrame(list(rules.items()), columns = ["ReactionID", "GPR"])
+ df_reactions = pd.DataFrame(list(reactions.items()), columns = ["ReactionID", "Formula"])
+
+ df_bounds = bounds.reset_index().rename(columns = {"index": "ReactionID"})
+ df_medium = medium.rename(columns = {"reaction": "ReactionID"})
+ df_medium["InMedium"] = True
+
+ merged = df_reactions.merge(df_rules, on = "ReactionID", how = "outer")
+ merged = merged.merge(df_bounds, on = "ReactionID", how = "outer")
+ merged = merged.merge(objective_function, on = "ReactionID", how = "outer")
+ if ARGS.name == "ENGRO2":
+ merged = merged.merge(compartments, on = "ReactionID", how = "outer")
+ merged = merged.merge(df_medium, on = "ReactionID", how = "left")
+
+ merged["InMedium"] = merged["InMedium"].fillna(False)
+
+ merged = merged.sort_values(by = "InMedium", ascending = False)
+
+ if not ARGS.out_tabular:
+ raise utils.ArgsErr("out_tabular", "output path (--out_tabular) is required when output_format == tabular", ARGS.out_tabular)
+ save_as_tabular_df(merged, ARGS.out_tabular)
+ expected = ARGS.out_tabular
+
+ # verify output exists and non-empty
+ if not expected or not os.path.exists(expected) or os.path.getsize(expected) == 0:
+ raise utils.DataErr(expected, "Output not created or empty")
+
+ print("Metabolic_model_setting: completed successfully")
+
+if __name__ == '__main__':
+
+ main()
diff -r a6e45049c1b9 -r 5b625d91bc7f COBRAxy/metabolic_model_setting.xml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/COBRAxy/metabolic_model_setting.xml Wed Sep 17 14:26:58 2025 +0000
@@ -0,0 +1,116 @@
+
+
+
+ numpy
+ pandas
+ cobra
+ lxml
+
+
+
+ marea_macros.xml
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff -r a6e45049c1b9 -r 5b625d91bc7f COBRAxy/ras_to_bounds_beta.xml
--- a/COBRAxy/ras_to_bounds_beta.xml Fri Sep 12 17:28:45 2025 +0000
+++ b/COBRAxy/ras_to_bounds_beta.xml Wed Sep 17 14:26:58 2025 +0000
@@ -64,28 +64,23 @@
What it does
-------------
-This tool generates the reactions bounds for a given metabolic model (JSON or XML format) both with and without the use of the Reaction Activity Scores (RAS) matrix generated by RAS generator.
-Moreover, it enables to use custom/pre-defined growth mediums to constrain exchange reactions. For a custom medium, It is suggested to use the template file returned by the Custom Data Generator tool.
-If the RAS matrix, generated by the RAS generator tool, is used, then a bounds file is generated for each cell. Otherwise, a single bounds file is returned.
-By default, all reactions in model.medium that are not present in the medium file have lower bound set to 0.0 and not set to the default model value.
+This tool generates the reaction bounds for a given tabular model created by the Metabolic Model Setting Tool and the Reaction Activity Scores (RAS) matrix generated by the RAS Generator.
+
Accepted files:
- - A model: JSON, XML, MAT or YAML (.yml) file reporting reactions and rules contained in the model. Supported compressed formats: .zip, .gz and .bz2. Filename must follow the pattern: {model_name}.{extension}.[zip|gz|bz2]
+ - A tabular model: tab-separated file containing information about a metabolic model.
- RAS matrix: tab-separated RAS file as returned by RAS generator. Multiple RAS files having different file name can be uploaded too (e.g. one RAS matrix for normal cells and one for cancer cells). Note that if multiple RAs matrices are uploaded, the bounds are normalzed across all cells.
- - Medium: tab-separated file containing lower and upper-bounds of medium reactions.
-
-Example of custum growth medium file:
-
-+------------+----------------+----------------+
-| Reaction ID| lower_bound | upper_bound |
-+============+================+================+
-| r1 | 0.123167 | 0.371355 |
-+------------+----------------+----------------+
-| r2 | 0.268765 | 0.765567 |
-+------------+----------------+----------------+
-
-
+Example of tabular model:
++-------------+----------+------+-------------+-------------+-------------------+-------------------+-----------+-----------+
+| Reaction ID | Formula | GPR | lower_bound | upper_bound | ObjectiveCoefficient | Pathways (one or more) | InMedium |
++=============+==========+======+=============+=============+===================+===================+===========+===========+
+| r1 | a+b-->c GeneA or GeneB | | 0.123167 | 0.371355 | 0 | Glycolysis | True |
++-------------+----------+------+-------------+-------------+-------------------+-------------------+-----------+-----------+
+| r2 | d+e-->f GeneC | | 0.268765 | 0.765567 | 1 | Glycolysis | False |
++-------------+----------+------+-------------+-------------+-------------------+-------------------+-----------+-----------+
+
+
Example for multiple RAS matrices:
- cancer.csv and normal.csv generated by RAS generator tool (the two class names are 'cancer' and 'normal').
- This tool returns one unique collection of bounds files for both cancer and normal cells (normalization is performed across all cells).
@@ -95,10 +90,11 @@
-------------
The tool generates:
- - bounds: reporting the bounds of the model, or cells if RAS is used. Format: tab-separated.
- - Classes: a file containing the class of each cell (only if multiple RAS matrices were uploaded). The class name of a RAS matrix corresponds to its file name. Format: tab-separated.
+ - A collection of tab files, one for each sample. Each file contains the lower and upper bounds computed from the RAS values and the FVA, used to perform flux sampling or optimization.
+ - Classes: a file containing the class of each sample. The class name of a RAS matrix corresponds to its file name. Format: tab-separated.
- a log file (.txt).
]]>
-
\ No newline at end of file
+
+