Mercurial > repos > pieterlukasse > prims_metabolomics2
annotate GCMS/combine_output.py @ 15:05ff1c55db84
fix for rankfilter, removed pfd read functional
author | linda.bakker@wur.nl <linda.bakker@wur.nl> |
---|---|
date | Fri, 20 Mar 2015 17:11:04 +0100 |
parents | 346ff9ad8c7a |
children | fe4682eb938c |
rev | line source |
---|---|
6 | 1 #!/usr/bin/env python |
2 # encoding: utf-8 | |
3 ''' | |
4 Module to combine output from two GCMS Galaxy tools (RankFilter and CasLookup) | |
5 ''' | |
6 | |
7 import csv | |
8 import re | |
9 import sys | |
10 import math | |
11 import pprint | |
12 | |
13 __author__ = "Marcel Kempenaar" | |
14 __contact__ = "brs@nbic.nl" | |
15 __copyright__ = "Copyright, 2012, Netherlands Bioinformatics Centre" | |
16 __license__ = "MIT" | |
17 | |
18 def _process_data(in_csv): | |
19 ''' | |
20 Generic method to parse a tab-separated file returning a dictionary with named columns | |
21 @param in_csv: input filename to be parsed | |
22 ''' | |
23 data = list(csv.reader(open(in_csv, 'rU'), delimiter='\t')) | |
24 header = data.pop(0) | |
25 # Create dictionary with column name as key | |
26 output = {} | |
27 for index in xrange(len(header)): | |
28 output[header[index]] = [row[index] for row in data] | |
29 return output | |
30 | |
31 | |
32 def _merge_data(rankfilter, caslookup): | |
33 ''' | |
34 Merges data from both input dictionaries based on the Centrotype field. This method will | |
35 build up a new list containing the merged hits as the items. | |
36 @param rankfilter: dictionary holding RankFilter output in the form of N lists (one list per attribute name) | |
37 @param caslookup: dictionary holding CasLookup output in the form of N lists (one list per attribute name) | |
38 ''' | |
39 # TODO: test for correct input files -> rankfilter and caslookup internal lists should have the same lenghts: | |
40 if (len(rankfilter['ID']) != len(caslookup['Centrotype'])): | |
41 raise Exception('rankfilter and caslookup files should have the same nr of rows/records ') | |
42 | |
43 merged = [] | |
44 processed = {} | |
45 for compound_id_idx in xrange(len(rankfilter['ID'])): | |
46 compound_id = rankfilter['ID'][compound_id_idx] | |
47 if not compound_id in processed : | |
48 # keep track of processed items to not repeat them | |
49 processed[compound_id] = compound_id | |
50 # get centrotype nr | |
51 centrotype = compound_id.split('-')[0] | |
52 # Get the indices for current compound ID in both data-structures for proper matching | |
53 rindex = [index for index, value in enumerate(rankfilter['ID']) if value == compound_id] | |
54 cindex = [index for index, value in enumerate(caslookup['Centrotype']) if value == centrotype] | |
55 | |
56 merged_hits = [] | |
57 # Combine hits | |
58 for hit in xrange(len(rindex)): | |
59 # Create records of hits to be merged ("keys" are the attribute names, so what the lines below do | |
60 # is create a new "dict" item with same "keys"/attributes, with each attribute filled with its | |
61 # corresponding value in the rankfilter or caslookup tables; i.e. | |
62 # rankfilter[key] => returns the list/array with size = nrrows, with the values for the attribute | |
63 # represented by "key". rindex[hit] => points to the row nr=hit (hit is a rownr/index) | |
64 rf_record = dict(zip(rankfilter.keys(), [rankfilter[key][rindex[hit]] for key in rankfilter.keys()])) | |
65 cl_record = dict(zip(caslookup.keys(), [caslookup[key][cindex[hit]] for key in caslookup.keys()])) | |
66 | |
67 merged_hit = _add_hit(rf_record, cl_record) | |
68 merged_hits.append(merged_hit) | |
69 | |
70 merged.append(merged_hits) | |
71 | |
72 return merged, len(rindex) | |
73 | |
74 | |
75 def _add_hit(rankfilter, caslookup): | |
76 ''' | |
77 Combines single records from both the RankFilter- and CasLookup-tools | |
78 @param rankfilter: record (dictionary) of one compound in the RankFilter output | |
79 @param caslookup: matching record (dictionary) of one compound in the CasLookup output | |
80 ''' | |
81 # The ID in the RankFilter output contains the following 5 fields: | |
82 rf_id = rankfilter['ID'].split('-') | |
83 try: | |
84 hit = [rf_id[0], # Centrotype | |
85 rf_id[1], # cent.Factor | |
86 rf_id[2], # scan nr | |
87 rf_id[3], # R.T. (umin) | |
88 rf_id[4], # nr. Peaks | |
89 # Appending other fields | |
90 rankfilter['R.T.'], | |
14
346ff9ad8c7a
fix for rankfilter, removed pfd read functional
linda.bakker@wur.nl <linda.bakker@wur.nl>
parents:
6
diff
changeset
|
91 rankfilter['Name'], |
346ff9ad8c7a
fix for rankfilter, removed pfd read functional
linda.bakker@wur.nl <linda.bakker@wur.nl>
parents:
6
diff
changeset
|
92 rankfilter['Formula'], |
6 | 93 rankfilter['Library'].strip(), |
94 rankfilter['CAS'].strip(), | |
95 rankfilter['Forward'], | |
96 rankfilter['Reverse'], | |
97 ((float(rankfilter['Forward']) + float(rankfilter['Reverse'])) / 2), | |
98 rankfilter['RIexp'], | |
99 caslookup['RI'], | |
100 rankfilter['RIsvr'], | |
101 # Calculate absolute differences | |
102 math.fabs(float(rankfilter['RIexp']) - float(rankfilter['RIsvr'])), | |
103 math.fabs(float(caslookup['RI']) - float(rankfilter['RIexp'])), | |
104 caslookup['Regression.Column.Name'], | |
105 caslookup['min'], | |
106 caslookup['max'], | |
107 caslookup['nr.duplicates'], | |
108 caslookup['Column.phase.type'], | |
109 caslookup['Column.name'], | |
110 rankfilter['Rank'], | |
111 rankfilter['%rel.err'], | |
112 rankfilter['Synonyms']] | |
113 except KeyError as error: | |
114 print "Problem reading in data from input file(s):\n", | |
115 print "Respective CasLookup entry: \n", pprint.pprint(caslookup), "\n" | |
116 print "Respective RankFilter entry: \n", pprint.pprint(rankfilter), "\n" | |
117 raise error | |
118 | |
119 return hit | |
120 | |
121 | |
14
346ff9ad8c7a
fix for rankfilter, removed pfd read functional
linda.bakker@wur.nl <linda.bakker@wur.nl>
parents:
6
diff
changeset
|
122 |
6 | 123 |
124 | |
125 def _get_default_caslookup(): | |
126 ''' | |
127 The Cas Lookup tool might not have found all compounds in the library searched, | |
128 this default dict will be used to combine with the Rank Filter output | |
129 ''' | |
130 return {'FORMULA': 'N/A', | |
131 'RI': '0.0', | |
132 'Regression.Column.Name': 'None', | |
133 'min': '0.0', | |
134 'max': '0.0', | |
135 'nr.duplicates': '0', | |
136 'Column.phase.type': 'N/A', | |
137 'Column.name': 'N/A'} | |
138 | |
139 | |
140 def _save_data(data, nhits, out_csv_single, out_csv_multi): | |
141 ''' | |
142 Writes tab-separated data to file | |
143 @param data: dictionary containing merged dataset | |
144 @param out_csv: output csv file | |
145 ''' | |
146 # Columns we don't repeat: | |
147 header_part1 = ['Centrotype', | |
148 'cent.Factor', | |
149 'scan nr.', | |
150 'R.T. (umin)', | |
151 'nr. Peaks', | |
152 'R.T.'] | |
153 # These are the headers/columns we repeat in case of | |
154 # combining hits in one line (see alternative_headers method below): | |
155 header_part2 = [ | |
156 'Name', | |
157 'FORMULA', | |
158 'Library', | |
159 'CAS', | |
160 'Forward', | |
161 'Reverse', | |
162 'Avg. (Forward, Reverse)', | |
163 'RIexp', | |
164 'RI', | |
165 'RIsvr', | |
166 'RIexp - RIsvr', | |
167 'RI - RIexp', | |
168 'Regression.Column.Name', | |
169 'min', | |
170 'max', | |
171 'nr.duplicates', | |
172 'Column.phase.type', | |
173 'Column.name', | |
174 'Rank', | |
175 '%rel.err', | |
176 'Synonyms'] | |
177 | |
178 # Open output file for writing | |
179 outfile_single_handle = open(out_csv_single, 'wb') | |
180 outfile_multi_handle = open(out_csv_multi, 'wb') | |
181 output_single_handle = csv.writer(outfile_single_handle, delimiter="\t") | |
182 output_multi_handle = csv.writer(outfile_multi_handle, delimiter="\t") | |
183 | |
184 # Write headers | |
185 output_single_handle.writerow(header_part1 + header_part2) | |
186 output_multi_handle.writerow(header_part1 + header_part2 + alternative_headers(header_part2, nhits-1)) | |
187 # Combine all hits for each centrotype into one line | |
188 line = [] | |
189 for centrotype_idx in xrange(len(data)): | |
190 i = 0 | |
191 for hit in data[centrotype_idx]: | |
192 if i==0: | |
193 line.extend(hit) | |
194 else: | |
195 line.extend(hit[6:]) | |
196 i = i+1 | |
197 # small validation (if error, it is a programming error): | |
198 if i > nhits: | |
199 raise Exception('Error: more hits that expected for centrotype_idx ' + centrotype_idx) | |
200 output_multi_handle.writerow(line) | |
201 line = [] | |
202 | |
203 # Write one line for each centrotype | |
204 for centrotype_idx in xrange(len(data)): | |
205 for hit in data[centrotype_idx]: | |
206 output_single_handle.writerow(hit) | |
207 | |
208 def alternative_headers(header_part2, nr_alternative_hits): | |
209 ''' | |
210 This method will iterate over the header names and add the string 'ALT#_' before each, | |
211 where # is the number of the alternative, according to number of alternative hits we want to add | |
212 to final csv/tsv | |
213 ''' | |
214 result = [] | |
215 for i in xrange(nr_alternative_hits): | |
216 for header_name in header_part2: | |
217 result.append("ALT" + str(i+1) + "_" + header_name) | |
218 return result | |
219 | |
220 def main(): | |
221 ''' | |
222 Combine Output main function | |
223 It will merge the result files from "RankFilter" and "Lookup RI for CAS numbers" | |
224 NB: the caslookup_result_file will typically have fewer lines than | |
225 rankfilter_result_file, so the merge has to consider this as well. The final file | |
226 should have the same nr of lines as rankfilter_result_file. | |
227 ''' | |
228 rankfilter_result_file = sys.argv[1] | |
229 caslookup_result_file = sys.argv[2] | |
230 output_single_csv = sys.argv[3] | |
231 output_multi_csv = sys.argv[4] | |
232 | |
233 # Read RankFilter and CasLookup output files | |
234 rankfilter = _process_data(rankfilter_result_file) | |
235 caslookup = _process_data(caslookup_result_file) | |
236 merged, nhits = _merge_data(rankfilter, caslookup) | |
237 _save_data(merged, nhits, output_single_csv, output_multi_csv) | |
238 | |
239 | |
240 if __name__ == '__main__': | |
241 main() |