456
|
1 """
|
|
2 Utilities for generating and manipulating COBRA models and related metadata.
|
|
3
|
|
4 This module includes helpers to:
|
|
5 - extract rules, reactions, bounds, objective coefficients, and compartments
|
|
6 - build a COBRA model from a tabular file
|
|
7 - set objective and medium from dataframes
|
|
8 - validate a model and convert gene identifiers
|
|
9 - translate model GPRs using mapping tables
|
|
10 """
|
418
|
11 import os
|
|
12 import cobra
|
|
13 import pandas as pd
|
419
|
14 import re
|
426
|
15 import logging
|
419
|
16 from typing import Optional, Tuple, Union, List, Dict, Set
|
426
|
17 from collections import defaultdict
|
418
|
18 import utils.rule_parsing as rulesUtils
|
419
|
19 import utils.reaction_parsing as reactionUtils
|
|
20 from cobra import Model as cobraModel, Reaction, Metabolite
|
418
|
21
|
|
22 ################################- DATA GENERATION -################################
|
|
23 ReactionId = str
|
419
|
24 def generate_rules(model: cobraModel, *, asParsed = True) -> Union[Dict[ReactionId, rulesUtils.OpList], Dict[ReactionId, str]]:
|
418
|
25 """
|
456
|
26 Generate a dictionary mapping reaction IDs to GPR rules from the model.
|
418
|
27
|
|
28 Args:
|
456
|
29 model: COBRA model to derive data from.
|
|
30 asParsed: If True, parse rules into a nested list structure; otherwise keep raw strings.
|
418
|
31
|
|
32 Returns:
|
456
|
33 Dict[ReactionId, rulesUtils.OpList]: Parsed rules by reaction ID.
|
|
34 Dict[ReactionId, str]: Raw rules by reaction ID.
|
418
|
35 """
|
|
36 _ruleGetter = lambda reaction : reaction.gene_reaction_rule
|
|
37 ruleExtractor = (lambda reaction :
|
|
38 rulesUtils.parseRuleToNestedList(_ruleGetter(reaction))) if asParsed else _ruleGetter
|
|
39
|
|
40 return {
|
|
41 reaction.id : ruleExtractor(reaction)
|
|
42 for reaction in model.reactions
|
|
43 if reaction.gene_reaction_rule }
|
|
44
|
419
|
45 def generate_reactions(model :cobraModel, *, asParsed = True) -> Dict[ReactionId, str]:
|
418
|
46 """
|
456
|
47 Generate a dictionary mapping reaction IDs to reaction formulas from the model.
|
418
|
48
|
|
49 Args:
|
456
|
50 model: COBRA model to derive data from.
|
|
51 asParsed: If True, convert formulas into a parsed representation; otherwise keep raw strings.
|
418
|
52
|
|
53 Returns:
|
456
|
54 Dict[ReactionId, str]: Reactions by reaction ID (parsed if requested).
|
418
|
55 """
|
|
56
|
|
57 unparsedReactions = {
|
|
58 reaction.id : reaction.reaction
|
|
59 for reaction in model.reactions
|
|
60 if reaction.reaction
|
|
61 }
|
|
62
|
|
63 if not asParsed: return unparsedReactions
|
|
64
|
|
65 return reactionUtils.create_reaction_dict(unparsedReactions)
|
|
66
|
419
|
67 def get_medium(model:cobraModel) -> pd.DataFrame:
|
456
|
68 """
|
|
69 Extract the uptake reactions representing the model medium.
|
|
70
|
|
71 Returns a DataFrame with a single column 'reaction' listing exchange reactions
|
|
72 with negative lower bound and no positive stoichiometric coefficients (uptake only).
|
|
73 """
|
418
|
74 trueMedium=[]
|
|
75 for r in model.reactions:
|
|
76 positiveCoeff=0
|
|
77 for m in r.metabolites:
|
|
78 if r.get_coefficient(m.id)>0:
|
|
79 positiveCoeff=1;
|
|
80 if (positiveCoeff==0 and r.lower_bound<0):
|
|
81 trueMedium.append(r.id)
|
|
82
|
|
83 df_medium = pd.DataFrame()
|
|
84 df_medium["reaction"] = trueMedium
|
|
85 return df_medium
|
|
86
|
426
|
87 def extract_objective_coefficients(model: cobraModel) -> pd.DataFrame:
|
|
88 """
|
456
|
89 Extract objective coefficients for each reaction.
|
|
90
|
426
|
91 Args:
|
456
|
92 model: COBRA model
|
|
93
|
426
|
94 Returns:
|
456
|
95 pd.DataFrame with columns: ReactionID, ObjectiveCoefficient
|
426
|
96 """
|
|
97 coeffs = []
|
456
|
98 # model.objective.expression is a linear expression
|
426
|
99 objective_expr = model.objective.expression.as_coefficients_dict()
|
|
100
|
|
101 for reaction in model.reactions:
|
|
102 coeff = objective_expr.get(reaction.forward_variable, 0.0)
|
|
103 coeffs.append({
|
|
104 "ReactionID": reaction.id,
|
|
105 "ObjectiveCoefficient": coeff
|
|
106 })
|
|
107
|
|
108 return pd.DataFrame(coeffs)
|
|
109
|
419
|
110 def generate_bounds(model:cobraModel) -> pd.DataFrame:
|
456
|
111 """
|
|
112 Build a DataFrame of lower/upper bounds for all reactions.
|
|
113
|
|
114 Returns:
|
|
115 pd.DataFrame indexed by reaction IDs with columns ['lower_bound', 'upper_bound'].
|
|
116 """
|
418
|
117
|
|
118 rxns = []
|
|
119 for reaction in model.reactions:
|
|
120 rxns.append(reaction.id)
|
|
121
|
|
122 bounds = pd.DataFrame(columns = ["lower_bound", "upper_bound"], index=rxns)
|
|
123
|
|
124 for reaction in model.reactions:
|
|
125 bounds.loc[reaction.id] = [reaction.lower_bound, reaction.upper_bound]
|
|
126 return bounds
|
|
127
|
|
128
|
|
129
|
419
|
130 def generate_compartments(model: cobraModel) -> pd.DataFrame:
|
418
|
131 """
|
|
132 Generates a DataFrame containing compartment information for each reaction.
|
|
133 Creates columns for each compartment position (Compartment_1, Compartment_2, etc.)
|
|
134
|
|
135 Args:
|
|
136 model: the COBRA model to extract compartment data from.
|
|
137
|
|
138 Returns:
|
|
139 pd.DataFrame: DataFrame with ReactionID and compartment columns
|
|
140 """
|
|
141 pathway_data = []
|
|
142
|
|
143 # First pass: determine the maximum number of pathways any reaction has
|
|
144 max_pathways = 0
|
|
145 reaction_pathways = {}
|
|
146
|
|
147 for reaction in model.reactions:
|
|
148 # Get unique pathways from all metabolites in the reaction
|
|
149 if type(reaction.annotation['pathways']) == list:
|
|
150 reaction_pathways[reaction.id] = reaction.annotation['pathways']
|
|
151 max_pathways = max(max_pathways, len(reaction.annotation['pathways']))
|
|
152 else:
|
|
153 reaction_pathways[reaction.id] = [reaction.annotation['pathways']]
|
|
154
|
|
155 # Create column names for pathways
|
|
156 pathway_columns = [f"Pathway_{i+1}" for i in range(max_pathways)]
|
|
157
|
|
158 # Second pass: create the data
|
|
159 for reaction_id, pathways in reaction_pathways.items():
|
|
160 row = {"ReactionID": reaction_id}
|
|
161
|
|
162 # Fill pathway columns
|
|
163 for i in range(max_pathways):
|
|
164 col_name = pathway_columns[i]
|
|
165 if i < len(pathways):
|
|
166 row[col_name] = pathways[i]
|
|
167 else:
|
|
168 row[col_name] = None # or "" if you prefer empty strings
|
|
169
|
|
170 pathway_data.append(row)
|
|
171
|
419
|
172 return pd.DataFrame(pathway_data)
|
|
173
|
|
174
|
|
175
|
|
176 def build_cobra_model_from_csv(csv_path: str, model_id: str = "new_model") -> cobraModel:
|
|
177 """
|
456
|
178 Build a COBRApy model from a tabular file with reaction data.
|
|
179
|
419
|
180 Args:
|
456
|
181 csv_path: Path to the tab-separated file.
|
|
182 model_id: ID for the newly created model.
|
|
183
|
419
|
184 Returns:
|
456
|
185 cobra.Model: The constructed COBRApy model.
|
419
|
186 """
|
|
187
|
|
188 df = pd.read_csv(csv_path, sep='\t')
|
|
189
|
|
190 model = cobraModel(model_id)
|
|
191
|
|
192 metabolites_dict = {}
|
|
193 compartments_dict = {}
|
|
194
|
456
|
195 print(f"Building model from {len(df)} reactions...")
|
419
|
196
|
|
197 for idx, row in df.iterrows():
|
448
|
198 reaction_formula = str(row['Formula']).strip()
|
419
|
199 if not reaction_formula or reaction_formula == 'nan':
|
|
200 continue
|
|
201
|
|
202 metabolites = extract_metabolites_from_reaction(reaction_formula)
|
|
203
|
|
204 for met_id in metabolites:
|
|
205 compartment = extract_compartment_from_metabolite(met_id)
|
|
206
|
|
207 if compartment not in compartments_dict:
|
|
208 compartments_dict[compartment] = compartment
|
|
209
|
|
210 if met_id not in metabolites_dict:
|
|
211 metabolites_dict[met_id] = Metabolite(
|
|
212 id=met_id,
|
|
213 compartment=compartment,
|
|
214 name=met_id.replace(f"_{compartment}", "").replace("__", "_")
|
|
215 )
|
|
216
|
|
217 model.compartments = compartments_dict
|
|
218
|
|
219 model.add_metabolites(list(metabolites_dict.values()))
|
|
220
|
456
|
221 print(f"Added {len(metabolites_dict)} metabolites and {len(compartments_dict)} compartments")
|
419
|
222
|
|
223 reactions_added = 0
|
|
224 reactions_skipped = 0
|
|
225
|
|
226 for idx, row in df.iterrows():
|
|
227
|
|
228 reaction_id = str(row['ReactionID']).strip()
|
427
|
229 reaction_formula = str(row['Formula']).strip()
|
419
|
230
|
|
231 if not reaction_formula or reaction_formula == 'nan':
|
456
|
232 raise ValueError(f"Missing reaction formula for {reaction_id}")
|
419
|
233
|
|
234 reaction = Reaction(reaction_id)
|
|
235 reaction.name = reaction_id
|
|
236
|
|
237 reaction.lower_bound = float(row['lower_bound']) if pd.notna(row['lower_bound']) else -1000.0
|
|
238 reaction.upper_bound = float(row['upper_bound']) if pd.notna(row['upper_bound']) else 1000.0
|
|
239
|
427
|
240 if pd.notna(row['GPR']) and str(row['GPR']).strip():
|
|
241 reaction.gene_reaction_rule = str(row['GPR']).strip()
|
419
|
242
|
|
243 try:
|
|
244 parse_reaction_formula(reaction, reaction_formula, metabolites_dict)
|
|
245 except Exception as e:
|
456
|
246 print(f"Error parsing reaction {reaction_id}: {e}")
|
419
|
247 reactions_skipped += 1
|
|
248 continue
|
|
249
|
|
250 model.add_reactions([reaction])
|
|
251 reactions_added += 1
|
|
252
|
|
253
|
456
|
254 print(f"Added {reactions_added} reactions, skipped {reactions_skipped} reactions")
|
419
|
255
|
430
|
256 # set objective function
|
|
257 set_objective_from_csv(model, df, obj_col="ObjectiveCoefficient")
|
|
258
|
419
|
259 set_medium_from_data(model, df)
|
|
260
|
456
|
261 print(f"Model completed: {len(model.reactions)} reactions, {len(model.metabolites)} metabolites")
|
419
|
262
|
|
263 return model
|
|
264
|
|
265
|
|
266 # Estrae tutti gli ID metaboliti nella formula (gestisce prefissi numerici + underscore)
|
|
267 def extract_metabolites_from_reaction(reaction_formula: str) -> Set[str]:
|
|
268 """
|
456
|
269 Extract metabolite IDs from a reaction formula.
|
|
270 Robust pattern: tokens ending with _<compartment> (e.g., _c, _m, _e),
|
|
271 allowing leading digits/underscores.
|
419
|
272 """
|
|
273 metabolites = set()
|
456
|
274 # optional coefficient followed by a token ending with _<letters>
|
419
|
275 pattern = r'(?:\d+(?:\.\d+)?\s+)?([A-Za-z0-9_]+_[a-z]+)'
|
|
276 matches = re.findall(pattern, reaction_formula)
|
|
277 metabolites.update(matches)
|
|
278 return metabolites
|
|
279
|
|
280
|
|
281 def extract_compartment_from_metabolite(metabolite_id: str) -> str:
|
456
|
282 """Extract the compartment from a metabolite ID."""
|
419
|
283 if '_' in metabolite_id:
|
|
284 return metabolite_id.split('_')[-1]
|
|
285 return 'c' # default cytoplasm
|
|
286
|
|
287
|
|
288 def parse_reaction_formula(reaction: Reaction, formula: str, metabolites_dict: Dict[str, Metabolite]):
|
456
|
289 """Parse a reaction formula and set metabolites with their coefficients."""
|
419
|
290
|
|
291 if '<=>' in formula:
|
|
292 left, right = formula.split('<=>')
|
|
293 reversible = True
|
|
294 elif '<--' in formula:
|
|
295 left, right = formula.split('<--')
|
|
296 reversible = False
|
|
297 elif '-->' in formula:
|
|
298 left, right = formula.split('-->')
|
|
299 reversible = False
|
|
300 elif '<-' in formula:
|
|
301 left, right = formula.split('<-')
|
|
302 reversible = False
|
|
303 else:
|
456
|
304 raise ValueError(f"Unrecognized reaction format: {formula}")
|
419
|
305
|
|
306 reactants = parse_metabolites_side(left.strip())
|
|
307 products = parse_metabolites_side(right.strip())
|
|
308
|
|
309 metabolites_to_add = {}
|
|
310
|
|
311 for met_id, coeff in reactants.items():
|
|
312 if met_id in metabolites_dict:
|
|
313 metabolites_to_add[metabolites_dict[met_id]] = -coeff
|
|
314
|
|
315 for met_id, coeff in products.items():
|
|
316 if met_id in metabolites_dict:
|
|
317 metabolites_to_add[metabolites_dict[met_id]] = coeff
|
|
318
|
|
319 reaction.add_metabolites(metabolites_to_add)
|
|
320
|
|
321
|
|
322 def parse_metabolites_side(side_str: str) -> Dict[str, float]:
|
456
|
323 """Parse one side of a reaction and extract metabolites with coefficients."""
|
419
|
324 metabolites = {}
|
|
325 if not side_str or side_str.strip() == '':
|
|
326 return metabolites
|
|
327
|
|
328 terms = side_str.split('+')
|
|
329 for term in terms:
|
|
330 term = term.strip()
|
|
331 if not term:
|
|
332 continue
|
|
333
|
456
|
334 # optional coefficient + id ending with _<compartment>
|
419
|
335 match = re.match(r'(?:(\d+\.?\d*)\s+)?([A-Za-z0-9_]+_[a-z]+)', term)
|
|
336 if match:
|
|
337 coeff_str, met_id = match.groups()
|
|
338 coeff = float(coeff_str) if coeff_str else 1.0
|
|
339 metabolites[met_id] = coeff
|
|
340
|
|
341 return metabolites
|
|
342
|
|
343
|
|
344
|
430
|
345 def set_objective_from_csv(model: cobra.Model, df: pd.DataFrame, obj_col: str = "ObjectiveCoefficient"):
|
419
|
346 """
|
430
|
347 Sets the model's objective function based on a column of coefficients in the CSV.
|
|
348 Can be any reaction(s), not necessarily biomass.
|
419
|
349 """
|
430
|
350 obj_dict = {}
|
419
|
351
|
430
|
352 for idx, row in df.iterrows():
|
|
353 reaction_id = str(row['ReactionID']).strip()
|
|
354 coeff = float(row[obj_col]) if pd.notna(row[obj_col]) else 0.0
|
|
355 if coeff != 0:
|
|
356 if reaction_id in model.reactions:
|
|
357 obj_dict[model.reactions.get_by_id(reaction_id)] = coeff
|
|
358 else:
|
|
359 print(f"Warning: reaction {reaction_id} not found in model, skipping for objective.")
|
|
360
|
|
361 if not obj_dict:
|
|
362 raise ValueError("No reactions found with non-zero objective coefficient.")
|
|
363
|
|
364 model.objective = obj_dict
|
|
365 print(f"Objective set with {len(obj_dict)} reactions.")
|
|
366
|
|
367
|
419
|
368
|
|
369
|
|
370 def set_medium_from_data(model: cobraModel, df: pd.DataFrame):
|
456
|
371 """Set the medium based on the 'InMedium' column in the dataframe."""
|
419
|
372 medium_reactions = df[df['InMedium'] == True]['ReactionID'].tolist()
|
|
373
|
|
374 medium_dict = {}
|
|
375 for rxn_id in medium_reactions:
|
|
376 if rxn_id in [r.id for r in model.reactions]:
|
|
377 reaction = model.reactions.get_by_id(rxn_id)
|
|
378 if reaction.lower_bound < 0: # Solo reazioni di uptake
|
|
379 medium_dict[rxn_id] = abs(reaction.lower_bound)
|
|
380
|
|
381 if medium_dict:
|
|
382 model.medium = medium_dict
|
456
|
383 print(f"Medium set with {len(medium_dict)} components")
|
419
|
384
|
|
385
|
|
386 def validate_model(model: cobraModel) -> Dict[str, any]:
|
456
|
387 """Validate the model and return basic statistics."""
|
419
|
388 validation = {
|
|
389 'num_reactions': len(model.reactions),
|
|
390 'num_metabolites': len(model.metabolites),
|
|
391 'num_genes': len(model.genes),
|
|
392 'num_compartments': len(model.compartments),
|
|
393 'objective': str(model.objective),
|
|
394 'medium_size': len(model.medium),
|
|
395 'reversible_reactions': len([r for r in model.reactions if r.reversibility]),
|
|
396 'exchange_reactions': len([r for r in model.reactions if r.id.startswith('EX_')]),
|
|
397 }
|
|
398
|
|
399 try:
|
456
|
400 # Growth test
|
419
|
401 solution = model.optimize()
|
|
402 validation['growth_rate'] = solution.objective_value
|
|
403 validation['status'] = solution.status
|
|
404 except Exception as e:
|
|
405 validation['growth_rate'] = None
|
|
406 validation['status'] = f"Error: {e}"
|
|
407
|
|
408 return validation
|
|
409
|
456
|
410 def convert_genes(model, annotation):
|
|
411 """Rename genes using a selected annotation key in gene.notes; returns a model copy."""
|
419
|
412 from cobra.manipulation import rename_genes
|
|
413 model2=model.copy()
|
|
414 try:
|
|
415 dict_genes={gene.id:gene.notes[annotation] for gene in model2.genes}
|
|
416 except:
|
|
417 print("No annotation in gene dict!")
|
|
418 return -1
|
|
419 rename_genes(model2,dict_genes)
|
|
420
|
426
|
421 return model2
|
|
422
|
|
423 # ---------- Utility helpers ----------
|
|
424 def _normalize_colname(col: str) -> str:
|
|
425 return col.strip().lower().replace(' ', '_')
|
|
426
|
|
427 def _choose_columns(mapping_df: 'pd.DataFrame') -> Dict[str, str]:
|
|
428 """
|
456
|
429 Find useful columns and return a dict {ensg: colname1, hgnc_id: colname2, ...}.
|
|
430 Raise ValueError if no suitable mapping is found.
|
426
|
431 """
|
|
432 cols = { _normalize_colname(c): c for c in mapping_df.columns }
|
|
433 chosen = {}
|
456
|
434 # candidate names for each category
|
426
|
435 candidates = {
|
|
436 'ensg': ['ensg', 'ensembl_gene_id', 'ensembl'],
|
|
437 'hgnc_id': ['hgnc_id', 'hgnc', 'hgnc:'],
|
444
|
438 'hgnc_symbol': ['hgnc_symbol', 'hgnc symbol', 'symbol'],
|
455
|
439 'entrez_id': ['entrez', 'entrez_id', 'entrezgene'],
|
|
440 'gene_number': ['gene_number']
|
426
|
441 }
|
|
442 for key, names in candidates.items():
|
|
443 for n in names:
|
|
444 if n in cols:
|
|
445 chosen[key] = cols[n]
|
|
446 break
|
|
447 return chosen
|
|
448
|
|
449 def _validate_target_uniqueness(mapping_df: 'pd.DataFrame',
|
|
450 source_col: str,
|
|
451 target_col: str,
|
|
452 model_source_genes: Optional[Set[str]] = None,
|
|
453 logger: Optional[logging.Logger] = None) -> None:
|
|
454 """
|
456
|
455 Check that, within the filtered mapping_df, each target maps to at most one source.
|
|
456 Log examples if duplicates are found.
|
426
|
457 """
|
|
458 if logger is None:
|
|
459 logger = logging.getLogger(__name__)
|
|
460
|
|
461 if mapping_df.empty:
|
|
462 logger.warning("Mapping dataframe is empty for the requested source genes; skipping uniqueness validation.")
|
|
463 return
|
|
464
|
456
|
465 # normalize temporary columns for grouping (without altering the original df)
|
426
|
466 tmp = mapping_df[[source_col, target_col]].copy()
|
|
467 tmp['_src_norm'] = tmp[source_col].astype(str).map(_normalize_gene_id)
|
|
468 tmp['_tgt_norm'] = tmp[target_col].astype(str).str.strip()
|
|
469
|
456
|
470 # optionally filter to the set of model source genes
|
426
|
471 if model_source_genes is not None:
|
|
472 tmp = tmp[tmp['_src_norm'].isin(model_source_genes)]
|
|
473
|
|
474 if tmp.empty:
|
|
475 logger.warning("After filtering to model source genes, mapping table is empty — nothing to validate.")
|
|
476 return
|
|
477
|
456
|
478 # build reverse mapping: target -> set(sources)
|
426
|
479 grouped = tmp.groupby('_tgt_norm')['_src_norm'].agg(lambda s: set(s.dropna()))
|
456
|
480 # find targets with more than one source
|
426
|
481 problematic = {t: sorted(list(s)) for t, s in grouped.items() if len(s) > 1}
|
|
482
|
|
483 if problematic:
|
456
|
484 # prepare warning message with examples (limited subset)
|
455
|
485 sample_items = list(problematic.items())
|
426
|
486 msg_lines = ["Mapping validation failed: some target IDs are associated with multiple source IDs."]
|
|
487 for tgt, sources in sample_items:
|
|
488 msg_lines.append(f" - target '{tgt}' <- sources: {', '.join(sources)}")
|
|
489 full_msg = "\n".join(msg_lines)
|
456
|
490 # log warning
|
455
|
491 logger.warning(full_msg)
|
426
|
492
|
456
|
493 # if everything is fine
|
426
|
494 logger.info("Mapping validation passed: no target ID is associated with multiple source IDs (within filtered set).")
|
|
495
|
|
496
|
|
497 def _normalize_gene_id(g: str) -> str:
|
456
|
498 """Normalize a gene ID for use as a key (removes prefixes like 'HGNC:' and strips)."""
|
426
|
499 if g is None:
|
|
500 return ""
|
|
501 g = str(g).strip()
|
|
502 # remove common prefixes
|
|
503 g = re.sub(r'^(HGNC:)', '', g, flags=re.IGNORECASE)
|
|
504 g = re.sub(r'^(ENSG:)', '', g, flags=re.IGNORECASE)
|
|
505 return g
|
|
506
|
455
|
507 def _simplify_boolean_expression(expr: str) -> str:
|
|
508 """
|
456
|
509 Simplify a boolean expression by removing duplicates and redundancies.
|
|
510 Handles expressions with 'and' and 'or'.
|
455
|
511 """
|
|
512 if not expr or not expr.strip():
|
|
513 return expr
|
|
514
|
456
|
515 # normalize operators
|
455
|
516 expr = expr.replace(' AND ', ' and ').replace(' OR ', ' or ')
|
|
517
|
456
|
518 # recursive helper to process expressions
|
455
|
519 def process_expression(s: str) -> str:
|
|
520 s = s.strip()
|
|
521 if not s:
|
|
522 return s
|
|
523
|
456
|
524 # handle parentheses
|
455
|
525 while '(' in s:
|
456
|
526 # find the innermost parentheses
|
455
|
527 start = -1
|
|
528 for i, c in enumerate(s):
|
|
529 if c == '(':
|
|
530 start = i
|
|
531 elif c == ')' and start != -1:
|
456
|
532 # process inner content
|
455
|
533 inner = s[start+1:i]
|
|
534 processed_inner = process_expression(inner)
|
|
535 s = s[:start] + processed_inner + s[i+1:]
|
|
536 break
|
|
537 else:
|
|
538 break
|
|
539
|
456
|
540 # split by 'or' at top level
|
455
|
541 or_parts = []
|
|
542 current_part = ""
|
|
543 paren_count = 0
|
|
544
|
|
545 tokens = s.split()
|
|
546 i = 0
|
|
547 while i < len(tokens):
|
|
548 token = tokens[i]
|
|
549 if token == 'or' and paren_count == 0:
|
|
550 if current_part.strip():
|
|
551 or_parts.append(current_part.strip())
|
|
552 current_part = ""
|
|
553 else:
|
|
554 if token.count('(') > token.count(')'):
|
|
555 paren_count += token.count('(') - token.count(')')
|
|
556 elif token.count(')') > token.count('('):
|
|
557 paren_count -= token.count(')') - token.count('(')
|
|
558 current_part += token + " "
|
|
559 i += 1
|
|
560
|
|
561 if current_part.strip():
|
|
562 or_parts.append(current_part.strip())
|
|
563
|
456
|
564 # process each OR part
|
455
|
565 processed_or_parts = []
|
|
566 for or_part in or_parts:
|
456
|
567 # split by 'and' within each OR part
|
455
|
568 and_parts = []
|
|
569 current_and = ""
|
|
570 paren_count = 0
|
|
571
|
|
572 and_tokens = or_part.split()
|
|
573 j = 0
|
|
574 while j < len(and_tokens):
|
|
575 token = and_tokens[j]
|
|
576 if token == 'and' and paren_count == 0:
|
|
577 if current_and.strip():
|
|
578 and_parts.append(current_and.strip())
|
|
579 current_and = ""
|
|
580 else:
|
|
581 if token.count('(') > token.count(')'):
|
|
582 paren_count += token.count('(') - token.count(')')
|
|
583 elif token.count(')') > token.count('('):
|
|
584 paren_count -= token.count(')') - token.count('(')
|
|
585 current_and += token + " "
|
|
586 j += 1
|
|
587
|
|
588 if current_and.strip():
|
|
589 and_parts.append(current_and.strip())
|
|
590
|
456
|
591 # deduplicate AND parts
|
455
|
592 unique_and_parts = list(dict.fromkeys(and_parts)) # mantiene l'ordine
|
|
593
|
|
594 if len(unique_and_parts) == 1:
|
|
595 processed_or_parts.append(unique_and_parts[0])
|
|
596 elif len(unique_and_parts) > 1:
|
|
597 processed_or_parts.append(" and ".join(unique_and_parts))
|
|
598
|
456
|
599 # deduplicate OR parts
|
455
|
600 unique_or_parts = list(dict.fromkeys(processed_or_parts))
|
|
601
|
|
602 if len(unique_or_parts) == 1:
|
|
603 return unique_or_parts[0]
|
|
604 elif len(unique_or_parts) > 1:
|
|
605 return " or ".join(unique_or_parts)
|
|
606 else:
|
|
607 return ""
|
|
608
|
|
609 try:
|
|
610 return process_expression(expr)
|
|
611 except Exception:
|
456
|
612 # if simplification fails, return the original expression
|
455
|
613 return expr
|
|
614
|
426
|
615 # ---------- Main public function ----------
|
|
616 def translate_model_genes(model: 'cobra.Model',
|
|
617 mapping_df: 'pd.DataFrame',
|
|
618 target_nomenclature: str,
|
|
619 source_nomenclature: str = 'hgnc_id',
|
455
|
620 allow_many_to_one: bool = False,
|
426
|
621 logger: Optional[logging.Logger] = None) -> 'cobra.Model':
|
|
622 """
|
456
|
623 Translate model genes from source_nomenclature to target_nomenclature using a mapping table.
|
|
624 mapping_df should contain columns enabling mapping (e.g., ensg, hgnc_id, hgnc_symbol, entrez).
|
|
625
|
455
|
626 Args:
|
456
|
627 model: COBRA model to translate.
|
|
628 mapping_df: DataFrame containing the mapping information.
|
|
629 target_nomenclature: Desired target key (e.g., 'hgnc_symbol').
|
|
630 source_nomenclature: Current source key in the model (default 'hgnc_id').
|
|
631 allow_many_to_one: If True, allow many-to-one mappings and handle duplicates in GPRs.
|
|
632 logger: Optional logger.
|
426
|
633 """
|
|
634 if logger is None:
|
|
635 logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
|
636 logger = logging.getLogger(__name__)
|
|
637
|
|
638 logger.info(f"Translating genes from '{source_nomenclature}' to '{target_nomenclature}'")
|
|
639
|
|
640 # normalize column names and choose relevant columns
|
|
641 chosen = _choose_columns(mapping_df)
|
|
642 if not chosen:
|
|
643 raise ValueError("Could not detect useful columns in mapping_df. Expected at least one of: ensg, hgnc_id, hgnc_symbol, entrez.")
|
|
644
|
|
645 # map source/target to actual dataframe column names (allow user-specified source/target keys)
|
|
646 # normalize input args
|
|
647 src_key = source_nomenclature.strip().lower()
|
|
648 tgt_key = target_nomenclature.strip().lower()
|
|
649
|
|
650 # try to find the actual column names for requested keys
|
|
651 col_for_src = None
|
|
652 col_for_tgt = None
|
|
653 # first, try exact match
|
|
654 for k, actual in chosen.items():
|
|
655 if k == src_key:
|
|
656 col_for_src = actual
|
|
657 if k == tgt_key:
|
|
658 col_for_tgt = actual
|
|
659
|
|
660 # if not found, try mapping common names
|
|
661 if col_for_src is None:
|
|
662 possible_src_names = {k: v for k, v in chosen.items()}
|
|
663 # try to match by contained substring
|
|
664 for k, actual in possible_src_names.items():
|
|
665 if src_key in k:
|
|
666 col_for_src = actual
|
|
667 break
|
|
668
|
|
669 if col_for_tgt is None:
|
|
670 for k, actual in chosen.items():
|
|
671 if tgt_key in k:
|
|
672 col_for_tgt = actual
|
|
673 break
|
|
674
|
|
675 if col_for_src is None:
|
|
676 raise ValueError(f"Source column for '{source_nomenclature}' not found in mapping dataframe.")
|
|
677 if col_for_tgt is None:
|
|
678 raise ValueError(f"Target column for '{target_nomenclature}' not found in mapping dataframe.")
|
|
679
|
|
680 model_source_genes = { _normalize_gene_id(g.id) for g in model.genes }
|
|
681 logger.info(f"Filtering mapping to {len(model_source_genes)} source genes present in model (normalized).")
|
|
682
|
|
683 tmp_map = mapping_df[[col_for_src, col_for_tgt]].dropna().copy()
|
|
684 tmp_map[col_for_src + "_norm"] = tmp_map[col_for_src].astype(str).map(_normalize_gene_id)
|
|
685
|
|
686 filtered_map = tmp_map[tmp_map[col_for_src + "_norm"].isin(model_source_genes)].copy()
|
|
687
|
|
688 if filtered_map.empty:
|
|
689 logger.warning("No mapping rows correspond to source genes present in the model after filtering. Proceeding with empty mapping (no translation will occur).")
|
|
690
|
455
|
691 if not allow_many_to_one:
|
|
692 _validate_target_uniqueness(filtered_map, col_for_src, col_for_tgt, model_source_genes=model_source_genes, logger=logger)
|
426
|
693
|
455
|
694 # Crea il mapping
|
426
|
695 gene_mapping = _create_gene_mapping(filtered_map, col_for_src, col_for_tgt, logger)
|
|
696
|
|
697 # copy model
|
|
698 model_copy = model.copy()
|
|
699
|
|
700 # statistics
|
455
|
701 stats = {'translated': 0, 'one_to_one': 0, 'one_to_many': 0, 'not_found': 0, 'simplified_gprs': 0}
|
426
|
702 unmapped = []
|
|
703 multi = []
|
|
704
|
|
705 original_genes = {g.id for g in model_copy.genes}
|
|
706 logger.info(f"Original genes count: {len(original_genes)}")
|
|
707
|
|
708 # translate GPRs
|
|
709 for rxn in model_copy.reactions:
|
|
710 gpr = rxn.gene_reaction_rule
|
|
711 if gpr and gpr.strip():
|
|
712 new_gpr = _translate_gpr(gpr, gene_mapping, stats, unmapped, multi, logger)
|
|
713 if new_gpr != gpr:
|
455
|
714 simplified_gpr = _simplify_boolean_expression(new_gpr)
|
|
715 if simplified_gpr != new_gpr:
|
|
716 stats['simplified_gprs'] += 1
|
|
717 logger.debug(f"Simplified GPR for {rxn.id}: '{new_gpr}' -> '{simplified_gpr}'")
|
|
718 rxn.gene_reaction_rule = simplified_gpr
|
|
719 logger.debug(f"Reaction {rxn.id}: '{gpr}' -> '{simplified_gpr}'")
|
426
|
720
|
|
721 # update model genes based on new GPRs
|
|
722 _update_model_genes(model_copy, logger)
|
|
723
|
|
724 # final logging
|
|
725 _log_translation_statistics(stats, unmapped, multi, original_genes, model_copy.genes, logger)
|
|
726
|
|
727 logger.info("Translation finished")
|
|
728 return model_copy
|
|
729
|
|
730
|
|
731 # ---------- helper functions ----------
|
|
732 def _create_gene_mapping(mapping_df, source_col: str, target_col: str, logger: logging.Logger) -> Dict[str, List[str]]:
|
|
733 """
|
|
734 Build mapping dict: source_id -> list of target_ids
|
|
735 Normalizes IDs (removes prefixes like 'HGNC:' etc).
|
|
736 """
|
|
737 df = mapping_df[[source_col, target_col]].dropna().copy()
|
|
738 # normalize to string
|
|
739 df[source_col] = df[source_col].astype(str).map(_normalize_gene_id)
|
|
740 df[target_col] = df[target_col].astype(str).str.strip()
|
|
741
|
|
742 df = df.drop_duplicates()
|
|
743
|
|
744 logger.info(f"Creating mapping from {len(df)} rows")
|
|
745
|
|
746 mapping = defaultdict(list)
|
|
747 for _, row in df.iterrows():
|
|
748 s = row[source_col]
|
|
749 t = row[target_col]
|
|
750 if t not in mapping[s]:
|
|
751 mapping[s].append(t)
|
|
752
|
|
753 # stats
|
|
754 one_to_one = sum(1 for v in mapping.values() if len(v) == 1)
|
|
755 one_to_many = sum(1 for v in mapping.values() if len(v) > 1)
|
|
756 logger.info(f"Mapping: {len(mapping)} source keys, {one_to_one} 1:1, {one_to_many} 1:many")
|
|
757 return dict(mapping)
|
|
758
|
|
759
|
|
760 def _translate_gpr(gpr_string: str,
|
|
761 gene_mapping: Dict[str, List[str]],
|
|
762 stats: Dict[str, int],
|
|
763 unmapped_genes: List[str],
|
|
764 multi_mapping_genes: List[Tuple[str, List[str]]],
|
|
765 logger: logging.Logger) -> str:
|
|
766 """
|
|
767 Translate genes inside a GPR string using gene_mapping.
|
|
768 Returns new GPR string.
|
|
769 """
|
|
770 # Generic token pattern: letters, digits, :, _, -, ., (captures HGNC:1234, ENSG000..., symbols)
|
|
771 token_pattern = r'\b[A-Za-z0-9:_.-]+\b'
|
|
772 tokens = re.findall(token_pattern, gpr_string)
|
|
773
|
|
774 logical = {'and', 'or', 'AND', 'OR', '(', ')'}
|
|
775 tokens = [t for t in tokens if t not in logical]
|
|
776
|
|
777 new_gpr = gpr_string
|
|
778
|
|
779 for token in sorted(set(tokens), key=lambda x: -len(x)): # longer tokens first to avoid partial replacement
|
|
780 norm = _normalize_gene_id(token)
|
|
781 if norm in gene_mapping:
|
|
782 targets = gene_mapping[norm]
|
|
783 stats['translated'] += 1
|
|
784 if len(targets) == 1:
|
|
785 stats['one_to_one'] += 1
|
|
786 replacement = targets[0]
|
|
787 else:
|
|
788 stats['one_to_many'] += 1
|
|
789 multi_mapping_genes.append((token, targets))
|
|
790 replacement = "(" + " or ".join(targets) + ")"
|
|
791
|
|
792 pattern = r'\b' + re.escape(token) + r'\b'
|
|
793 new_gpr = re.sub(pattern, replacement, new_gpr)
|
|
794 else:
|
|
795 stats['not_found'] += 1
|
|
796 if token not in unmapped_genes:
|
|
797 unmapped_genes.append(token)
|
|
798 logger.debug(f"Token not found in mapping (left as-is): {token}")
|
|
799
|
|
800 return new_gpr
|
|
801
|
|
802
|
|
803 def _update_model_genes(model: 'cobra.Model', logger: logging.Logger):
|
|
804 """
|
|
805 Rebuild model.genes from gene_reaction_rule content.
|
|
806 Removes genes not referenced and adds missing ones.
|
|
807 """
|
|
808 # collect genes in GPRs
|
|
809 gene_pattern = r'\b[A-Za-z0-9:_.-]+\b'
|
|
810 logical = {'and', 'or', 'AND', 'OR', '(', ')'}
|
|
811 genes_in_gpr: Set[str] = set()
|
|
812
|
|
813 for rxn in model.reactions:
|
|
814 gpr = rxn.gene_reaction_rule
|
|
815 if gpr and gpr.strip():
|
|
816 toks = re.findall(gene_pattern, gpr)
|
|
817 toks = [t for t in toks if t not in logical]
|
|
818 # normalize IDs consistent with mapping normalization
|
|
819 toks = [_normalize_gene_id(t) for t in toks]
|
|
820 genes_in_gpr.update(toks)
|
|
821
|
|
822 # existing gene ids
|
|
823 existing = {g.id for g in model.genes}
|
|
824
|
|
825 # remove obsolete genes
|
|
826 to_remove = [gid for gid in existing if gid not in genes_in_gpr]
|
|
827 removed = 0
|
|
828 for gid in to_remove:
|
|
829 try:
|
|
830 gene_obj = model.genes.get_by_id(gid)
|
|
831 model.genes.remove(gene_obj)
|
|
832 removed += 1
|
|
833 except Exception:
|
|
834 # safe-ignore
|
|
835 pass
|
|
836
|
|
837 # add new genes
|
|
838 added = 0
|
|
839 for gid in genes_in_gpr:
|
|
840 if gid not in existing:
|
|
841 new_gene = cobra.Gene(gid)
|
|
842 try:
|
|
843 model.genes.add(new_gene)
|
|
844 except Exception:
|
|
845 # fallback: if model.genes doesn't support add, try append or model.add_genes
|
|
846 try:
|
|
847 model.genes.append(new_gene)
|
|
848 except Exception:
|
|
849 try:
|
|
850 model.add_genes([new_gene])
|
|
851 except Exception:
|
|
852 logger.warning(f"Could not add gene object for {gid}")
|
|
853 added += 1
|
|
854
|
|
855 logger.info(f"Model genes updated: removed {removed}, added {added}")
|
|
856
|
|
857
|
|
858 def _log_translation_statistics(stats: Dict[str, int],
|
|
859 unmapped_genes: List[str],
|
|
860 multi_mapping_genes: List[Tuple[str, List[str]]],
|
|
861 original_genes: Set[str],
|
|
862 final_genes,
|
|
863 logger: logging.Logger):
|
|
864 logger.info("=== TRANSLATION STATISTICS ===")
|
|
865 logger.info(f"Translated: {stats.get('translated', 0)} (1:1 = {stats.get('one_to_one', 0)}, 1:many = {stats.get('one_to_many', 0)})")
|
|
866 logger.info(f"Not found tokens: {stats.get('not_found', 0)}")
|
455
|
867 logger.info(f"Simplified GPRs: {stats.get('simplified_gprs', 0)}")
|
426
|
868
|
|
869 final_ids = {g.id for g in final_genes}
|
|
870 logger.info(f"Genes in model: {len(original_genes)} -> {len(final_ids)}")
|
|
871
|
|
872 if unmapped_genes:
|
|
873 logger.warning(f"Unmapped tokens ({len(unmapped_genes)}): {', '.join(unmapped_genes[:20])}{(' ...' if len(unmapped_genes)>20 else '')}")
|
|
874 if multi_mapping_genes:
|
|
875 logger.info(f"Multi-mapping examples ({len(multi_mapping_genes)}):")
|
|
876 for orig, targets in multi_mapping_genes[:10]:
|
|
877 logger.info(f" {orig} -> {', '.join(targets)}") |