Mercurial > repos > bimib > cobraxy
comparison COBRAxy/flux_to_map.py @ 211:4e368ecd4fb6 draft
Uploaded
author | francesco_lapi |
---|---|
date | Thu, 28 Nov 2024 15:41:30 +0000 |
parents | df664d1a86d4 |
children | ba7043091fe3 |
comparison
equal
deleted
inserted
replaced
210:3ca179b83574 | 211:4e368ecd4fb6 |
---|---|
725 p_value, z_score = computePValue(l1, l2) | 725 p_value, z_score = computePValue(l1, l2) |
726 avg1 = sum(l1) / len(l1) | 726 avg1 = sum(l1) / len(l1) |
727 avg2 = sum(l2) / len(l2) | 727 avg2 = sum(l2) / len(l2) |
728 f_c = fold_change(avg1, avg2) | 728 f_c = fold_change(avg1, avg2) |
729 if not isinstance(z_score, str) and max_z_score < abs(z_score): max_z_score = abs(z_score) | 729 if not isinstance(z_score, str) and max_z_score < abs(z_score): max_z_score = abs(z_score) |
730 print(reactId, 'pValue ', float(p_value), 'fold change ', f_c, 'z_score ', z_score, 'avg1 ', avg1, 'avg2 ', avg2) | 730 |
731 tmp[reactId] = [float(p_value), f_c, z_score, avg1, avg2] | 731 tmp[reactId] = [float(p_value), f_c, z_score, avg1, avg2] |
732 except (TypeError, ZeroDivisionError): continue | 732 except (TypeError, ZeroDivisionError): continue |
733 | 733 |
734 return tmp, max_z_score | 734 return tmp, max_z_score |
735 | 735 |
757 enrichment_results = [] | 757 enrichment_results = [] |
758 | 758 |
759 | 759 |
760 if ARGS.comparison == "manyvsmany": | 760 if ARGS.comparison == "manyvsmany": |
761 for i, j in it.combinations(class_pat.keys(), 2): | 761 for i, j in it.combinations(class_pat.keys(), 2): |
762 print(f"Comparing {i} and {j}") | 762 |
763 comparisonDict, max_z_score = compareDatasetPair(class_pat.get(i), class_pat.get(j), ids) | 763 comparisonDict, max_z_score = compareDatasetPair(class_pat.get(i), class_pat.get(j), ids) |
764 enrichment_results.append((i, j, comparisonDict, max_z_score)) | 764 enrichment_results.append((i, j, comparisonDict, max_z_score)) |
765 | 765 |
766 elif ARGS.comparison == "onevsrest": | 766 elif ARGS.comparison == "onevsrest": |
767 for single_cluster in class_pat.keys(): | 767 for single_cluster in class_pat.keys(): |
768 rest = [item for k, v in class_pat.items() if k != single_cluster for item in v] | 768 rest = [item for k, v in class_pat.items() if k != single_cluster for item in v] |
769 print(f"Comparing {single_cluster} and {rest}") | 769 |
770 comparisonDict, max_z_score = compareDatasetPair(class_pat.get(single_cluster), rest, ids) | 770 comparisonDict, max_z_score = compareDatasetPair(class_pat.get(single_cluster), rest, ids) |
771 enrichment_results.append((single_cluster, "rest", comparisonDict, max_z_score)) | 771 enrichment_results.append((single_cluster, "rest", comparisonDict, max_z_score)) |
772 | 772 |
773 elif ARGS.comparison == "onevsmany": | 773 elif ARGS.comparison == "onevsmany": |
774 controlItems = class_pat.get(ARGS.control) | 774 controlItems = class_pat.get(ARGS.control) |