changeset 519:8f65bed8d4cc draft

Uploaded
author luca_milaz
date Thu, 09 Oct 2025 10:08:20 +0000
parents c3c1d3b3941f
children 6f8d63f8f6b6
files COBRAxy/flux_simulation.py
diffstat 1 files changed, 14 insertions(+), 11 deletions(-) [+]
line wrap: on
line diff
--- a/COBRAxy/flux_simulation.py	Thu Oct 09 10:00:22 2025 +0000
+++ b/COBRAxy/flux_simulation.py	Thu Oct 09 10:08:20 2025 +0000
@@ -166,13 +166,13 @@
     print(s)
 
 
-def write_to_file(dataset: pd.DataFrame, name: str, path: str, keep_index:bool=False)->None:
+def write_to_file(dataset: pd.DataFrame, path: str, keep_index:bool=False, name:str=None)->None:
     """
     Write a DataFrame to a TSV file under path with a given base name.
 
     Args:
         dataset: The DataFrame to write.
-        name: Base file name (without extension).
+        name: Base file name (without extension). If None, 'path' is treated as the full file path.
         path: Directory path where the file will be saved.
         keep_index: Whether to keep the DataFrame index in the file.
 
@@ -180,7 +180,10 @@
         None
     """
     dataset.index.name = 'Reactions'
-    dataset.to_csv(os.path.join(path, name + ".csv"), sep = '\t', index = keep_index)
+    if name:
+        dataset.to_csv(os.path.join(path, name + ".csv"), sep = '\t', index = keep_index)
+    else:
+        dataset.to_csv(path, sep = '\t', index = keep_index)
 
 ############################ dataset input ####################################
 def read_dataset(data :str, name :str) -> pd.DataFrame:
@@ -254,7 +257,7 @@
     samplesTotal = pd.DataFrame(samplesTotal_array, columns=reaction_ids)
     
     # Save the final merged result as CSV
-    write_to_file(samplesTotal.T, model_name, ARGS.output_path, True)
+    write_to_file(samplesTotal.T, ARGS.output_path, True, name=model_name)
     
     # Clean up temporary numpy files
     for i in range(n_batches):
@@ -331,7 +334,7 @@
     samplesTotal = pd.DataFrame(samplesTotal_array, columns=reaction_ids)
     
     # Save the final merged result as CSV
-    write_to_file(samplesTotal.T, model_name,  ARGS.output_path, True)
+    write_to_file(samplesTotal.T, ARGS.output_path, True, name=model_name)
     
     # Clean up temporary numpy files
     for i in range(n_batches):
@@ -591,17 +594,17 @@
         if "mean" in ARGS.output_types:
             all_mean = all_mean.fillna(0.0)
             all_mean = all_mean.sort_index()
-            write_to_file(all_mean.T, "mean", ARGS.out_mean, True)
+            write_to_file(all_mean.T, ARGS.out_mean, True)
 
         if "median" in ARGS.output_types:
             all_median = all_median.fillna(0.0)
             all_median = all_median.sort_index()
-            write_to_file(all_median.T, "median", ARGS.out_median, True)
+            write_to_file(all_median.T, ARGS.out_median, True)
         
         if "quantiles" in ARGS.output_types:
             all_quantiles = all_quantiles.fillna(0.0)
             all_quantiles = all_quantiles.sort_index()
-            write_to_file(all_quantiles.T, "quantiles", ARGS.out_quantiles, True)
+            write_to_file(all_quantiles.T, ARGS.out_quantiles, True)
     else:
         print("=== SAMPLING SKIPPED (n_samples = 0 or sampling disabled) ===")
 
@@ -616,19 +619,19 @@
     if "pFBA" in ARGS.output_type_analysis:
         all_pFBA = pd.concat([result[index_result] for result in results], ignore_index=False)
         all_pFBA = all_pFBA.sort_index()
-        write_to_file(all_pFBA.T, "pFBA", ARGS.out_pfba, True)
+        write_to_file(all_pFBA.T, ARGS.out_pfba, True)
         index_result += 1
         
     if "FVA" in ARGS.output_type_analysis:
         all_FVA = pd.concat([result[index_result] for result in results], ignore_index=False)
         all_FVA = all_FVA.sort_index()
-        write_to_file(all_FVA.T, "FVA", ARGS.out_fva, True)
+        write_to_file(all_FVA.T, ARGS.out_fva, True)
         index_result += 1
         
     if "sensitivity" in ARGS.output_type_analysis:
         all_sensitivity = pd.concat([result[index_result] for result in results], ignore_index=False)
         all_sensitivity = all_sensitivity.sort_index()
-        write_to_file(all_sensitivity.T, "sensitivity", ARGS.out_sensitivity, True)
+        write_to_file(all_sensitivity.T, ARGS.out_sensitivity, True)
 
     return