0
|
1 #!/usr/bin/env python
|
|
2 """
|
|
3 #
|
|
4 #------------------------------------------------------------------------------
|
|
5 # University of Minnesota
|
|
6 # Copyright 2015, Regents of the University of Minnesota
|
|
7 #------------------------------------------------------------------------------
|
|
8 # Author:
|
|
9 #
|
|
10 # James E Johnson
|
|
11 #
|
|
12 #------------------------------------------------------------------------------
|
|
13 """
|
|
14
|
|
15 import json
|
|
16 import logging
|
|
17 import optparse
|
|
18 from optparse import OptionParser
|
|
19 import os
|
|
20 import sys
|
|
21 import re
|
|
22 import urllib
|
|
23 import urllib2
|
|
24 try:
|
|
25 import xml.etree.cElementTree as ET
|
|
26 except ImportError:
|
|
27 import xml.etree.ElementTree as ET
|
|
28
|
|
29 def warn_err(msg,exit_code=1):
|
|
30 sys.stderr.write(msg)
|
|
31 if exit_code:
|
|
32 sys.exit(exit_code)
|
|
33
|
1
|
34 pept2lca_column_order = ['peptide','taxon_rank','taxon_id','taxon_name']
|
|
35 pept2lca_extra_column_order = ['peptide','superkingdom','kingdom','subkingdom','superphylum','phylum','subphylum','superclass','class','subclass','infraclass','superorder','order','suborder','infraorder','parvorder','superfamily','family','subfamily','tribe','subtribe','genus','subgenus','species_group','species_subgroup','species','subspecies','varietas','forma' ]
|
|
36 pept2lca_all_column_order = pept2lca_column_order + pept2lca_extra_column_order[1:]
|
|
37 pept2prot_column_order = ['peptide','uniprot_id','taxon_id']
|
|
38 pept2prot_extra_column_order = pept2prot_column_order + ['taxon_name','ec_references','go_references','refseq_ids','refseq_protein_ids','insdc_ids','insdc_protein_ids']
|
|
39
|
|
40 def __main__():
|
|
41 version = '1.1'
|
|
42 pep_pat = '^([ABCDEFGHIKLMNPQRSTVWXYZ]+)$'
|
|
43
|
|
44 def read_tabular(filepath,col):
|
|
45 peptides = []
|
|
46 with open(filepath) as fp:
|
|
47 for i,line in enumerate(fp):
|
|
48 if line.strip() == '' or line.startswith('#'):
|
|
49 continue
|
|
50 fields = line.rstrip('\n').split('\t')
|
|
51 peptide = fields[col]
|
|
52 if not re.match(pep_pat,peptide):
|
|
53 warn_err('"%s" is not a peptide (line %d column %d of tabular file: %s)\n' % (peptide,i,col,filepath),exit_code=invalid_ec)
|
|
54 peptides.append(peptide)
|
|
55 return peptides
|
|
56
|
|
57 def get_fasta_entries(fp):
|
0
|
58 name, seq = None, []
|
|
59 for line in fp:
|
1
|
60 line = line.rstrip()
|
|
61 if line.startswith(">"):
|
|
62 if name: yield (name, ''.join(seq))
|
|
63 name, seq = line, []
|
|
64 else:
|
|
65 seq.append(line)
|
0
|
66 if name: yield (name, ''.join(seq))
|
|
67
|
1
|
68 def read_fasta(filepath):
|
|
69 peptides = []
|
|
70 with open(filepath) as fp:
|
|
71 for id, peptide in get_fasta_entries(fp):
|
|
72 if not re.match(pep_pat,peptide):
|
|
73 warn_err('"%s" is not a peptide (id %s of fasta file: %s)\n' % (peptide,id,filepath),exit_code=invalid_ec)
|
|
74 peptides.append(peptide)
|
|
75 return peptides
|
|
76
|
|
77 def read_mzid(fp):
|
|
78 peptides = []
|
|
79 for event, elem in ET.iterparse(fp):
|
|
80 if event == 'end':
|
|
81 if re.search('PeptideSequence',elem.tag):
|
|
82 peptides.append(elem.text)
|
|
83 return peptides
|
0
|
84
|
1
|
85 def read_pepxml(fp):
|
|
86 peptides = []
|
|
87 for event, elem in ET.iterparse(fp):
|
|
88 if event == 'end':
|
|
89 if re.search('search_hit',elem.tag):
|
|
90 peptides.append(elem.get('peptide'))
|
|
91 return peptides
|
0
|
92
|
1
|
93 def best_match(peptide,matches):
|
|
94 if not matches:
|
|
95 return None
|
|
96 elif len(matches) == 1:
|
|
97 return matches[0].copy()
|
|
98 else:
|
|
99 # find the most specific match (peptide is always the first column order field)
|
|
100 for col in reversed(pept2lca_extra_column_order[1:]):
|
|
101 col_id = col+"_id" if options.extra else col
|
|
102 for match in matches:
|
|
103 if 'taxon_rank' in match and match['taxon_rank'] == col:
|
|
104 return match.copy()
|
|
105 if col_id in match and match[col_id]:
|
|
106 return match.copy()
|
|
107 return None
|
|
108
|
0
|
109 #Parse Command Line
|
|
110 parser = optparse.OptionParser()
|
1
|
111 # unipept API choice
|
|
112 parser.add_option( '-a', '--api', dest='unipept', default='pept2lca', choices=['pept2lca','pept2taxa','pept2prot'], help='The unipept application: pept2lca, pept2taxa, or pept2prot' )
|
|
113 # input files
|
0
|
114 parser.add_option( '-t', '--tabular', dest='tabular', default=None, help='A tabular file that contains a peptide column' )
|
|
115 parser.add_option( '-c', '--column', dest='column', type='int', default=0, help='The column (zero-based) in the tabular file that contains peptide sequences' )
|
|
116 parser.add_option( '-f', '--fasta', dest='fasta', default=None, help='A fasta file containing peptide sequences' )
|
|
117 parser.add_option( '-m', '--mzid', dest='mzid', default=None, help='A mxIdentML file containing peptide sequences' )
|
|
118 parser.add_option( '-p', '--pepxml', dest='pepxml', default=None, help='A pepxml file containing peptide sequences' )
|
|
119 # Unipept Flags
|
|
120 parser.add_option( '-e', '--equate_il', dest='equate_il', action='store_true', default=False, help='isoleucine (I) and leucine (L) are equated when matching tryptic peptides to UniProt records' )
|
|
121 parser.add_option( '-x', '--extra', dest='extra', action='store_true', default=False, help='return the complete lineage of the taxonomic lowest common ancestor' )
|
|
122 parser.add_option( '-n', '--names', dest='names', action='store_true', default=False, help='return the names of all ranks in the lineage of the taxonomic lowest common ancestor' )
|
1
|
123 # output fields
|
|
124 parser.add_option( '-A', '--allfields', dest='allfields', action='store_true', default=False, help='inlcude fields: taxon_rank,taxon_id,taxon_name csv and tsv outputs' )
|
0
|
125 # Warn vs Error Flag
|
|
126 parser.add_option( '-S', '--strict', dest='strict', action='store_true', default=False, help='Print exit on invalid peptide' )
|
1
|
127 # output files
|
0
|
128 parser.add_option( '-J', '--json', dest='json', default=None, help='Output file path for json formatted results')
|
|
129 parser.add_option( '-T', '--tsv', dest='tsv', default=None, help='Output file path for TAB-separated-values (.tsv) formatted results')
|
|
130 parser.add_option( '-C', '--csv', dest='csv', default=None, help='Output file path for Comma-separated-values (.csv) formatted results')
|
1
|
131 parser.add_option( '-U', '--unmatched', dest='unmatched', default=None, help='Output file path for peptide with no matches' )
|
|
132 # debug
|
|
133 parser.add_option( '-d', '--debug', dest='debug', action='store_true', default=False, help='Turning on debugging' )
|
|
134 parser.add_option( '-v', '--version', dest='version', action='store_true', default=False, help='pring version and exit' )
|
0
|
135 (options, args) = parser.parse_args()
|
1
|
136 if options.version:
|
|
137 print >> sys.stdout,"%s" % version
|
|
138 sys.exit(0)
|
0
|
139 invalid_ec = 2 if options.strict else None
|
|
140 peptides = []
|
|
141 ## Get peptide sequences
|
|
142 if options.mzid:
|
|
143 peptides += read_mzid(options.mzid)
|
|
144 if options.pepxml:
|
|
145 peptides += read_pepxml(options.pepxml)
|
|
146 if options.tabular:
|
1
|
147 peptides += read_tabular(options.tabular,options.column)
|
0
|
148 if options.fasta:
|
1
|
149 peptides += read_fasta(options.fasta)
|
0
|
150 if args and len(args) > 0:
|
|
151 for i,peptide in enumerate(args):
|
|
152 if not re.match(pep_pat,peptide):
|
|
153 warn_err('"%s" is not a peptide (arg %d)\n' % (peptide,i),exit_code=invalid_ec)
|
|
154 peptides.append(peptide)
|
|
155 if len(peptides) < 1:
|
|
156 warn_err("No peptides input!",exit_code=1)
|
1
|
157 column_order = pept2lca_column_order
|
|
158 if options.unipept == 'pept2prot':
|
|
159 column_order = pept2prot_extra_column_order if options.extra else pept2prot_column_order
|
|
160 else:
|
|
161 if options.extra or options.names:
|
|
162 column_order = pept2lca_all_column_order if options.allfields else pept2lca_extra_column_order
|
|
163 else:
|
|
164 column_order = pept2lca_column_order
|
|
165 ## map to tryptic peptides
|
|
166 pepToParts = {p: re.split("\n", re.sub(r'(?<=[RK])(?=[^P])','\n', p)) for p in peptides}
|
|
167 partToPeps = {}
|
|
168 for peptide, parts in pepToParts.iteritems():
|
|
169 if options.debug: print >> sys.stdout, "peptide: %s\ttryptic: %s\n" % (peptide, parts)
|
|
170 for part in parts:
|
|
171 if len(part) > 50:
|
|
172 warn_err("peptide: %s tryptic fragment len %d > 50 for %s\n" % (peptide,len(part),part),exit_code=None)
|
|
173 if 5 <= len(part) <= 50:
|
|
174 partToPeps.setdefault(part,[]).append(peptide)
|
|
175 trypticPeptides = partToPeps.keys()
|
0
|
176 ## unipept
|
|
177 post_data = []
|
|
178 if options.equate_il:
|
|
179 post_data.append(("equate_il","true"))
|
|
180 if options.names:
|
|
181 post_data.append(("extra","true"))
|
|
182 post_data.append(("names","true"))
|
|
183 elif options.extra:
|
|
184 post_data.append(("extra","true"))
|
1
|
185 post_data += [('input[]', x) for x in trypticPeptides]
|
0
|
186 headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Accept': 'application/json'}
|
|
187 url = 'http://api.unipept.ugent.be/api/v1/%s' % options.unipept
|
|
188 req = urllib2.Request( url, headers = headers, data = urllib.urlencode(post_data) )
|
1
|
189 unipept_resp = json.loads( urllib2.urlopen( req ).read() )
|
|
190 unmatched_peptides = []
|
|
191 peptideMatches = []
|
|
192 if options.debug: print >> sys.stdout,"unipept response: %s\n" % str(unipept_resp)
|
|
193 if options.unipept == 'pept2prot' or options.unipept == 'pept2taxa':
|
|
194 dupkey = 'uniprot_id' if options.unipept == 'pept2prot' else 'taxon_id' ## should only keep one of these per input peptide
|
|
195 ## multiple entries per trypticPeptide for pep2prot or pep2taxa
|
|
196 mapping = {}
|
|
197 for match in unipept_resp:
|
|
198 mapping.setdefault(match['peptide'],[]).append(match)
|
|
199 for peptide in peptides:
|
|
200 # Get the intersection of matches to the tryptic parts
|
|
201 keyToMatch = None
|
|
202 for part in pepToParts[peptide]:
|
|
203 if part in mapping:
|
|
204 temp = {match[dupkey] : match for match in mapping[part]}
|
|
205 if keyToMatch:
|
|
206 dkeys = set(keyToMatch.keys()) - set(temp.keys())
|
|
207 for k in dkeys:
|
|
208 del keyToMatch[k]
|
|
209 else:
|
|
210 keyToMatch = temp
|
|
211 ## keyToMatch = keyToMatch.fromkeys([x for x in keyToMatch if x in temp]) if keyToMatch else temp
|
|
212 if not keyToMatch:
|
|
213 unmatched_peptides.append(peptide)
|
|
214 else:
|
|
215 for key,match in keyToMatch.iteritems():
|
|
216 match['tryptic_peptide'] = match['peptide']
|
|
217 match['peptide'] = peptide
|
|
218 peptideMatches.append(match)
|
|
219 else:
|
|
220 ## should be one response per trypticPeptide for pep2lca
|
|
221 respMap = {v['peptide']:v for v in unipept_resp}
|
|
222 ## map resp back to peptides
|
|
223 for peptide in peptides:
|
|
224 matches = list()
|
|
225 for part in pepToParts[peptide]:
|
|
226 if part in respMap:
|
|
227 matches.append(respMap[part])
|
|
228 match = best_match(peptide,matches)
|
|
229 if not match:
|
|
230 unmatched_peptides.append(peptide)
|
|
231 longest_tryptic_peptide = sorted(pepToParts[peptide], key=lambda x: len(x))[-1]
|
|
232 match = {'peptide' : longest_tryptic_peptide}
|
|
233 match['tryptic_peptide'] = match['peptide']
|
|
234 match['peptide'] = peptide
|
|
235 peptideMatches.append(match)
|
|
236 resp = peptideMatches
|
|
237 if options.debug: print >> sys.stdout,"\nmapped response: %s\n" % str(resp)
|
0
|
238 ## output results
|
1
|
239 if not (options.unmatched or options.json or options.tsv or options.csv):
|
0
|
240 print >> sys.stdout, str(resp)
|
1
|
241 if options.unmatched:
|
|
242 with open(options.unmatched,'w') as outputFile:
|
0
|
243 for peptide in peptides:
|
1
|
244 if peptide in unmatched_peptides:
|
0
|
245 outputFile.write("%s\n" % peptide)
|
|
246 if options.json:
|
|
247 with open(options.json,'w') as outputFile:
|
|
248 outputFile.write(str(resp))
|
|
249 if options.tsv or options.csv:
|
|
250 # 'pept2lca','pept2taxa','pept2prot'
|
|
251 found_keys = set()
|
|
252 results = []
|
|
253 for i,pdict in enumerate(resp):
|
|
254 results.append(pdict)
|
|
255 found_keys |= set(pdict.keys())
|
|
256 # print >> sys.stderr, "%s\n%s" % (pdict.keys(),found_keys)
|
|
257 column_names = []
|
|
258 column_keys = []
|
|
259 for col in column_order:
|
|
260 if col in found_keys:
|
|
261 column_names.append(col)
|
|
262 column_keys.append(col)
|
|
263 elif options.extra or options.names:
|
|
264 col_id = col+'_id'
|
|
265 col_name = col+'_name'
|
|
266 if options.extra:
|
|
267 if col_id in found_keys:
|
|
268 column_names.append(col_id)
|
|
269 column_keys.append(col_id)
|
|
270 if options.names:
|
|
271 if col_name in found_keys:
|
|
272 column_names.append(col)
|
|
273 column_keys.append(col_name)
|
|
274 else:
|
|
275 if col+'_name' in found_keys:
|
|
276 column_names.append(col)
|
|
277 column_keys.append(col+'_name')
|
|
278 elif col+'_id' in found_keys:
|
|
279 column_names.append(col)
|
|
280 column_keys.append(col+'_id')
|
|
281 # print >> sys.stderr, "%s\n%s" % (column_names,column_keys)
|
|
282 taxa = []
|
|
283 for i,pdict in enumerate(results):
|
|
284 vals = [str(pdict[x]) if x in pdict and pdict[x] else '' for x in column_keys]
|
1
|
285 if vals not in taxa:
|
|
286 taxa.append(vals)
|
0
|
287 if options.tsv:
|
|
288 with open(options.tsv,'w') as outputFile:
|
|
289 outputFile.write("#%s\n"% '\t'.join(column_names))
|
|
290 for vals in taxa:
|
|
291 outputFile.write("%s\n"% '\t'.join(vals))
|
|
292 if options.csv:
|
|
293 with open(options.csv,'w') as outputFile:
|
|
294 outputFile.write("%s\n"% ','.join(column_names))
|
|
295 for vals in taxa:
|
|
296 outputFile.write("%s\n"% ','.join(['"%s"' % (v if v else '') for v in vals]))
|
|
297
|
|
298 if __name__ == "__main__" : __main__()
|