Mercurial > repos > artbio > fetch_fasta_from_ncbi
comparison fetch_fasta_from_NCBI.py @ 4:c667d0ee39f5 draft
planemo upload for repository https://github.com/ARTbio/tools-artbio/tree/master/tools/fetch_fasta_from_ncbi commit ca3070e85c370b914ffa0562afe12b363e05aea4
author | artbio |
---|---|
date | Wed, 29 Nov 2017 17:38:52 -0500 |
parents | 8be88084f89c |
children | 706fe8139955 |
comparison
equal
deleted
inserted
replaced
3:8be88084f89c | 4:c667d0ee39f5 |
---|---|
53 self.retmax_esearch = 100000 | 53 self.retmax_esearch = 100000 |
54 self.retmax_efetch = 500 | 54 self.retmax_efetch = 500 |
55 self.count = 0 | 55 self.count = 0 |
56 self.webenv = "" | 56 self.webenv = "" |
57 self.query_key = "" | 57 self.query_key = "" |
58 if options.get_uids: | |
59 self.get_uids = True | |
60 else: | |
61 self.get_uids = False | |
62 if options.iuds_file: | |
63 with open(options.iuds_file, 'r') as f: | |
64 self.ids.extend(f.readline().split(' ')) | |
58 | 65 |
59 def dry_run(self): | 66 def dry_run(self): |
60 self.get_count_value() | 67 self.get_count_value() |
61 | 68 |
62 def retrieve(self): | 69 def retrieve(self): |
63 """ | 70 """ |
64 Retrieve the fasta sequences corresponding to the query | 71 Retrieve the fasta sequences corresponding to the query |
65 """ | 72 """ |
66 self.get_count_value() | 73 if len(self.ids) == 0: |
74 self.get_count_value() | |
75 else: | |
76 self.count = len(self.ids) | |
67 # If no UIDs are found exit script | 77 # If no UIDs are found exit script |
68 if self.count > 0: | 78 if self.count > 0: |
69 self.get_uids_list() | 79 if len(self.ids) == 0: |
70 try: | 80 self.get_uids_list() |
71 self.get_sequences() | 81 if not self.get_uids: |
72 except QueryException as e: | 82 try: |
73 self.logger.error("Exiting script.") | 83 self.get_sequences() |
74 raise e | 84 except QueryException as e: |
85 self.logger.error("Exiting script.") | |
86 raise e | |
87 else: | |
88 with open(self.outname, 'w') as f: | |
89 f.write('\t'.join(self.ids)+'\n') | |
75 else: | 90 else: |
76 self.logger.error("No UIDs were found. Exiting script.") | 91 self.logger.error("No UIDs were found. Exiting script.") |
77 raise Exception("") | 92 raise Exception("") |
78 | 93 |
79 def get_count_value(self): | 94 def get_count_value(self): |
138 querylog = response.readlines() | 153 querylog = response.readlines() |
139 response.close() | 154 response.close() |
140 time.sleep(1) | 155 time.sleep(1) |
141 return querylog | 156 return querylog |
142 | 157 |
143 def epost(self, db, ids): | 158 def sanitiser(self, db, fastaseq): |
144 url = self.base + "epost.fcgi" | 159 if(db not in "nuccore protein"): |
145 self.logger.debug("url_epost: %s" % url) | 160 return fastaseq |
146 values = {'db': db, | 161 regex = re.compile(r"[ACDEFGHIKLMNPQRSTVWYBZ]{49,}") |
147 'id': ids} | 162 sane_seqlist = [] |
148 data = urllib.urlencode(values) | 163 seqlist = fastaseq.split("\n\n") |
149 req = urllib2.Request(url, data) | 164 for seq in seqlist[:-1]: |
150 serverResponse = False | 165 fastalines = seq.split("\n") |
151 nb_trials = 0 | 166 if len(fastalines) < 2: |
152 while not serverResponse: | 167 self.logger.info("Empty sequence for %s" % |
153 nb_trials += 1 | 168 ("|".join(fastalines[0].split("|")[:4]))) |
154 try: | 169 self.logger.info("%s download is skipped" % |
155 self.logger.debug("Try number %s for opening and readin URL %s" | 170 ("|".join(fastalines[0].split("|")[:4]))) |
156 % (nb_trials, url+data)) | 171 continue |
157 response = urllib2.urlopen(req) | 172 if db == "nuccore": |
158 querylog = response.readlines() | 173 badnuc = 0 |
159 response.close() | 174 for nucleotide in fastalines[1]: |
160 serverResponse = True | 175 if nucleotide not in "ATGC": |
161 except urllib2.HTTPError as e: | 176 badnuc += 1 |
162 self.logger.info("urlopen error:%s, %s" % (e.code, e.read())) | 177 if float(badnuc)/len(fastalines[1]) > 0.4: |
163 self.logger.info("Retrying in 1 sec") | 178 self.logger.info("%s ambiguous nucleotides in %s\ |
164 serverResponse = False | 179 or download interrupted at this offset\ |
165 time.sleep(1) | 180 | %s" % (float(badnuc)/len(fastalines[1]), |
166 except urllib2.URLError as e: | 181 "|".join(fastalines[0].split("|") |
167 self.logger.info("urlopen error: Failed to reach a server") | 182 [:4]), |
168 self.logger.info("Reason :%s" % (e.reason)) | 183 fastalines[1])) |
169 self.logger.info("Retrying in 1 sec") | 184 self.logger.info("%s download is skipped" % |
170 serverResponse = False | 185 (fastalines[0].split("|")[:4])) |
171 time.sleep(1) | 186 continue |
172 except httplib.IncompleteRead as e: | 187 """ remove spaces and trim the header to 100 chars """ |
173 self.logger.info("IncompleteRead error: %s" % (e.partial)) | 188 fastalines[0] = fastalines[0].replace(" ", "_")[:100] |
174 self.logger.info("Retrying in 1 sec") | 189 cleanseq = "\n".join(fastalines) |
175 serverResponse = False | 190 sane_seqlist.append(cleanseq) |
176 time.sleep(1) | 191 elif db == "protein": |
177 self.logger.debug("query response:") | 192 fastalines[0] = fastalines[0][0:100] |
178 for line in querylog: | 193 fastalines[0] = fastalines[0].replace(" ", "_") |
179 self.logger.debug(line.rstrip()) | 194 fastalines[0] = fastalines[0].replace("[", "_") |
180 if '</QueryKey>' in line: | 195 fastalines[0] = fastalines[0].replace("]", "_") |
181 self.query_key = str(line[line.find('<QueryKey>') + | 196 fastalines[0] = fastalines[0].replace("=", "_") |
182 len('<QueryKey>'):line.find('</QueryKey>') | 197 """ because blast makedb doesn't like it """ |
183 ]) | 198 fastalines[0] = fastalines[0].rstrip("_") |
184 if '</WebEnv>' in line: | 199 fastalines[0] = re.sub(regex, "_", fastalines[0]) |
185 self.webenv = str(line[line.find('<WebEnv>')+len('<WebEnv>'): | 200 cleanseq = "\n".join(fastalines) |
186 line.find('</WebEnv>')]) | 201 sane_seqlist.append(cleanseq) |
187 self.logger.debug("*** epost action ***") | 202 self.logger.info("clean sequences appended: %d" % (len(sane_seqlist))) |
188 self.logger.debug("query_key: %s" % self.query_key) | 203 return "\n".join(sane_seqlist) |
189 self.logger.debug("webenv: %s" % self.webenv) | 204 |
190 time.sleep(1) | 205 def efetch(self, db, uid_list): |
191 | |
192 def efetch(self, db, query_key, webenv): | |
193 url = self.base + "efetch.fcgi" | 206 url = self.base + "efetch.fcgi" |
194 self.logger.debug("url_efetch: %s" % url) | 207 self.logger.debug("url_efetch: %s" % url) |
195 values = {'db': db, | 208 values = {'db': db, |
196 'query_key': query_key, | 209 'id': uid_list, |
197 'webenv': webenv, | |
198 'rettype': "fasta", | 210 'rettype': "fasta", |
199 'retmode': "text"} | 211 'retmode': "text"} |
200 data = urllib.urlencode(values) | 212 data = urllib.urlencode(values) |
201 req = urllib2.Request(url, data) | 213 req = urllib2.Request(url, data) |
202 self.logger.debug("data: %s" % str(data)) | 214 self.logger.debug("data: %s" % str(data)) |
249 this batch. Aborting."}) | 261 this batch. Aborting."}) |
250 fasta = self.sanitiser(self.dbname, fasta) | 262 fasta = self.sanitiser(self.dbname, fasta) |
251 time.sleep(0.1) | 263 time.sleep(0.1) |
252 return fasta | 264 return fasta |
253 | 265 |
254 def sanitiser(self, db, fastaseq): | |
255 if(db not in "nuccore protein"): | |
256 return fastaseq | |
257 regex = re.compile(r"[ACDEFGHIKLMNPQRSTVWYBZ]{49,}") | |
258 sane_seqlist = [] | |
259 seqlist = fastaseq.split("\n\n") | |
260 for seq in seqlist[:-1]: | |
261 fastalines = seq.split("\n") | |
262 if len(fastalines) < 2: | |
263 self.logger.info("Empty sequence for %s" % | |
264 ("|".join(fastalines[0].split("|")[:4]))) | |
265 self.logger.info("%s download is skipped" % | |
266 ("|".join(fastalines[0].split("|")[:4]))) | |
267 continue | |
268 if db == "nuccore": | |
269 badnuc = 0 | |
270 for nucleotide in fastalines[1]: | |
271 if nucleotide not in "ATGC": | |
272 badnuc += 1 | |
273 if float(badnuc)/len(fastalines[1]) > 0.4: | |
274 self.logger.info("%s ambiguous nucleotides in %s\ | |
275 or download interrupted at this offset\ | |
276 | %s" % (float(badnuc)/len(fastalines[1]), | |
277 "|".join(fastalines[0].split("|") | |
278 [:4]), | |
279 fastalines[1])) | |
280 self.logger.info("%s download is skipped" % | |
281 (fastalines[0].split("|")[:4])) | |
282 continue | |
283 """ remove spaces and trim the header to 100 chars """ | |
284 fastalines[0] = fastalines[0].replace(" ", "_")[:100] | |
285 cleanseq = "\n".join(fastalines) | |
286 sane_seqlist.append(cleanseq) | |
287 elif db == "protein": | |
288 fastalines[0] = fastalines[0][0:100] | |
289 fastalines[0] = fastalines[0].replace(" ", "_") | |
290 fastalines[0] = fastalines[0].replace("[", "_") | |
291 fastalines[0] = fastalines[0].replace("]", "_") | |
292 fastalines[0] = fastalines[0].replace("=", "_") | |
293 """ because blast makedb doesn't like it """ | |
294 fastalines[0] = fastalines[0].rstrip("_") | |
295 fastalines[0] = re.sub(regex, "_", fastalines[0]) | |
296 cleanseq = "\n".join(fastalines) | |
297 sane_seqlist.append(cleanseq) | |
298 self.logger.info("clean sequences appended: %d" % (len(sane_seqlist))) | |
299 return "\n".join(sane_seqlist) | |
300 | |
301 def get_sequences(self): | 266 def get_sequences(self): |
302 """ | 267 batch_size = 200 |
303 Total number of records from the input set to be retrieved, | |
304 up to a maximum of 10,000. Optionally, for a large set the value of | |
305 retstart can be iterated while holding retmax constant, thereby | |
306 downloading the entire set in batches of size retmax. | |
307 http://www.ncbi.nlm.nih.gov/books/NBK25499/#chapter4.EFetch | |
308 """ | |
309 batch_size = self.retmax_efetch | |
310 count = self.count | 268 count = self.count |
311 uids_list = self.ids | 269 uids_list = self.ids |
312 self.logger.info("Batch size for efetch action: %d" % batch_size) | 270 self.logger.info("Batch size for efetch action: %d" % batch_size) |
313 self.logger.info("Number of batches for efetch action: %d" % | 271 self.logger.info("Number of batches for efetch action: %d" % |
314 ((count / batch_size) + 1)) | 272 ((count / batch_size) + 1)) |
315 with open(self.outname, 'w') as out: | 273 with open(self.outname, 'w') as out: |
316 for start in range(0, count, batch_size): | 274 for start in range(0, count, batch_size): |
317 end = min(count, start+batch_size) | 275 end = min(count, start+batch_size) |
318 batch = uids_list[start:end] | 276 batch = uids_list[start:end] |
319 if self.epost(self.dbname, ",".join(batch)) != -1: | 277 self.logger.info("retrieving batch %d" % |
320 mfasta = '' | 278 ((start / batch_size) + 1)) |
321 while not mfasta: | 279 try: |
322 self.logger.info("retrieving batch %d" % | 280 mfasta = self.efetch(self.dbname, ','.join(batch)) |
323 ((start / batch_size) + 1)) | 281 out.write(mfasta + '\n') |
324 try: | 282 except QueryException as e: |
325 mfasta = self.efetch(self.dbname, self.query_key, | 283 self.logger.error("%s" % e.message) |
326 self.webenv) | 284 raise e |
327 out.write(mfasta + '\n') | 285 urllib.urlcleanup() |
328 except QueryException as e: | |
329 self.logger.error("%s" % e.message) | |
330 raise e | |
331 | 286 |
332 | 287 |
333 LOG_FORMAT = '%(asctime)s|%(levelname)-8s|%(message)s' | 288 LOG_FORMAT = '%(asctime)s|%(levelname)-8s|%(message)s' |
334 LOG_DATEFMT = '%Y-%m-%d %H:%M:%S' | 289 LOG_DATEFMT = '%Y-%m-%d %H:%M:%S' |
335 LOG_LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'] | 290 LOG_LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'] |
336 | 291 |
337 | 292 |
338 def command_parse(): | 293 def command_parse(): |
339 parser = argparse.ArgumentParser(description='Retrieve data from NCBI') | 294 parser = argparse.ArgumentParser(description='Retrieve data from NCBI') |
340 parser.add_argument('-i', dest='query_string', help='NCBI Query String', | 295 parser.add_argument('-i', dest='query_string', help='NCBI Query String') |
341 required=True) | 296 parser.add_argument('--UID_list', dest='iuds_file', |
297 help='file containing a list of iuds to be fetched') | |
342 parser.add_argument('-o', dest='outname', help='output file name') | 298 parser.add_argument('-o', dest='outname', help='output file name') |
343 parser.add_argument('-d', dest='dbname', help='database type') | 299 parser.add_argument('-d', dest='dbname', help='database type') |
344 parser.add_argument('--count', '-c', dest='count_ids', | 300 parser.add_argument('--count', '-c', dest='count_ids', |
345 action='store_true', default=False, | 301 action='store_true', default=False, |
346 help='dry run ouputing only the number of sequences\ | 302 help='dry run ouputing only the number of sequences\ |
347 found') | 303 found') |
304 parser.add_argument('--get_uids', '-u', dest='get_uids', default=False, | |
305 action='store_true', help='prints to the output a list\ | |
306 of UIDs') | |
348 parser.add_argument('-l', '--logfile', help='log file (default=stderr)') | 307 parser.add_argument('-l', '--logfile', help='log file (default=stderr)') |
349 parser.add_argument('--loglevel', choices=LOG_LEVELS, default='INFO', | 308 parser.add_argument('--loglevel', choices=LOG_LEVELS, default='INFO', |
350 help='logging level (default: INFO)') | 309 help='logging level (default: INFO)') |
351 args = parser.parse_args() | 310 args = parser.parse_args() |
311 if args.query_string is not None and args.iuds_file is not None: | |
312 parser.error('Please choose either fetching the -i query or the -u\ | |
313 list.') | |
352 return args | 314 return args |
353 | 315 |
354 | 316 |
355 def __main__(): | 317 def __main__(): |
356 """ main function """ | 318 """ main function """ |
367 E = Eutils(args, logger) | 329 E = Eutils(args, logger) |
368 if args.count_ids: | 330 if args.count_ids: |
369 try: | 331 try: |
370 E.dry_run() | 332 E.dry_run() |
371 except Exception: | 333 except Exception: |
372 sys.exit(1) | 334 sys.exit(-1) |
373 else: | 335 else: |
374 try: | 336 try: |
375 E.retrieve() | 337 E.retrieve() |
376 except Exception: | 338 except Exception: |
377 sys.exit(1) | 339 sys.exit(-1) |
378 | 340 |
379 | 341 |
380 if __name__ == "__main__": | 342 if __name__ == "__main__": |
381 __main__() | 343 __main__() |