Mercurial > repos > ieguinoa > data_manager_salmon_index_builder
comparison data_manager/data_manager_fetch_gff.py @ 0:6cd60ba8a842 draft
Uploaded
author | ieguinoa |
---|---|
date | Tue, 14 Aug 2018 11:14:52 -0400 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:6cd60ba8a842 |
---|---|
1 #!/usr/bin/env python | |
2 #Dan Blankenberg | |
3 | |
4 import sys | |
5 import os | |
6 import tempfile | |
7 import shutil | |
8 import optparse | |
9 from ftplib import FTP | |
10 import tarfile | |
11 import zipfile | |
12 import gzip | |
13 import bz2 | |
14 try: | |
15 # For Python 3.0 and later | |
16 from urllib.request import urlopen | |
17 from io import BytesIO as StringIO | |
18 from io import UnsupportedOperation | |
19 except ImportError: | |
20 # Fall back to Python 2's urllib2 | |
21 from urllib2 import urlopen | |
22 from StringIO import StringIO | |
23 UnsupportedOperation = AttributeError | |
24 from json import loads, dumps | |
25 | |
26 | |
27 CHUNK_SIZE = 2**20 # 1mb | |
28 | |
29 DATA_TABLE_NAME = 'all_gff' | |
30 | |
31 def cleanup_before_exit( tmp_dir ): | |
32 if tmp_dir and os.path.exists( tmp_dir ): | |
33 shutil.rmtree( tmp_dir ) | |
34 | |
35 | |
36 def stop_err(msg): | |
37 sys.stderr.write(msg) | |
38 sys.exit(1) | |
39 | |
40 | |
41 def get_dbkey_dbname_id_name( params, dbkey_description=None ): | |
42 # dbkey = params['param_dict']['dbkey_source']['dbkey'] | |
43 #TODO: ensure sequence_id is unique and does not already appear in location file | |
44 sequence_id = params['param_dict']['sequence_id'] | |
45 if not sequence_id: | |
46 sequence_id = dbkey #uuid.uuid4() generate and use an uuid instead? | |
47 | |
48 # if params['param_dict']['dbkey_source']['dbkey_source_selector'] == 'new': | |
49 # dbkey_name = params['param_dict']['dbkey_source']['dbkey_name'] | |
50 # if not dbkey_name: | |
51 # dbkey_name = dbkey | |
52 # else: | |
53 # dbkey_name = None | |
54 dbkey = params['param_dict']['dbkey'] | |
55 dbkey_name = dbkey_description | |
56 sequence_name = params['param_dict']['sequence_name'] | |
57 if not sequence_name: | |
58 sequence_name = dbkey_description | |
59 if not sequence_name: | |
60 sequence_name = dbkey | |
61 return dbkey, dbkey_name, sequence_id, sequence_name | |
62 | |
63 | |
64 def _get_files_in_ftp_path( ftp, path ): | |
65 path_contents = [] | |
66 ftp.retrlines( 'MLSD %s' % ( path ), path_contents.append ) | |
67 return [ line.split( ';' )[ -1 ].lstrip() for line in path_contents ] | |
68 | |
69 | |
70 def _get_stream_readers_for_tar( fh, tmp_dir ): | |
71 fasta_tar = tarfile.open( fileobj=fh, mode='r:*' ) | |
72 return [x for x in [fasta_tar.extractfile(member) for member in fasta_tar.getmembers()] if x] | |
73 | |
74 | |
75 def _get_stream_readers_for_zip( fh, tmp_dir ): | |
76 """ | |
77 Unpacks all archived files in a zip file. | |
78 Individual files will be concatenated (in _stream_fasta_to_file) | |
79 """ | |
80 fasta_zip = zipfile.ZipFile( fh, 'r' ) | |
81 rval = [] | |
82 for member in fasta_zip.namelist(): | |
83 fasta_zip.extract( member, tmp_dir ) | |
84 rval.append( open( os.path.join( tmp_dir, member ), 'rb' ) ) | |
85 return rval | |
86 | |
87 | |
88 def _get_stream_readers_for_gzip( fh, tmp_dir ): | |
89 return [ gzip.GzipFile( fileobj=fh, mode='rb') ] | |
90 | |
91 | |
92 def _get_stream_readers_for_bz2( fh, tmp_dir ): | |
93 return [ bz2.BZ2File( fh.name, 'rb') ] | |
94 | |
95 | |
96 def sort_fasta( fasta_filename, sort_method, params ): | |
97 if sort_method is None: | |
98 return | |
99 assert sort_method in SORTING_METHODS, ValueError( "%s is not a valid sorting option." % sort_method ) | |
100 return SORTING_METHODS[ sort_method ]( fasta_filename, params ) | |
101 | |
102 | |
103 def _move_and_index_fasta_for_sorting( fasta_filename ): | |
104 unsorted_filename = tempfile.NamedTemporaryFile().name | |
105 shutil.move( fasta_filename, unsorted_filename ) | |
106 fasta_offsets = {} | |
107 unsorted_fh = open( unsorted_filename ) | |
108 while True: | |
109 offset = unsorted_fh.tell() | |
110 line = unsorted_fh.readline() | |
111 if not line: | |
112 break | |
113 if line.startswith( ">" ): | |
114 line = line.split( None, 1 )[0][1:] | |
115 fasta_offsets[ line ] = offset | |
116 unsorted_fh.close() | |
117 current_order = map( lambda x: x[1], sorted( map( lambda x: ( x[1], x[0] ), fasta_offsets.items() ) ) ) | |
118 return ( unsorted_filename, fasta_offsets, current_order ) | |
119 | |
120 | |
121 def _write_sorted_fasta( sorted_names, fasta_offsets, sorted_fasta_filename, unsorted_fasta_filename ): | |
122 unsorted_fh = open( unsorted_fasta_filename ) | |
123 sorted_fh = open( sorted_fasta_filename, 'wb+' ) | |
124 | |
125 for name in sorted_names: | |
126 offset = fasta_offsets[ name ] | |
127 unsorted_fh.seek( offset ) | |
128 sorted_fh.write( unsorted_fh.readline() ) | |
129 while True: | |
130 line = unsorted_fh.readline() | |
131 if not line or line.startswith( ">" ): | |
132 break | |
133 sorted_fh.write( line ) | |
134 unsorted_fh.close() | |
135 sorted_fh.close() | |
136 | |
137 | |
138 def _sort_fasta_as_is( fasta_filename, params ): | |
139 return | |
140 | |
141 def _sort_fasta_lexicographical( fasta_filename, params ): | |
142 ( unsorted_filename, fasta_offsets, current_order ) = _move_and_index_fasta_for_sorting( fasta_filename ) | |
143 sorted_names = sorted( fasta_offsets.keys() ) | |
144 if sorted_names == current_order: | |
145 shutil.move( unsorted_filename, fasta_filename ) | |
146 else: | |
147 _write_sorted_fasta( sorted_names, fasta_offsets, fasta_filename, unsorted_filename ) | |
148 | |
149 | |
150 def _sort_fasta_gatk( fasta_filename, params ): | |
151 #This method was added by reviewer request. | |
152 ( unsorted_filename, fasta_offsets, current_order ) = _move_and_index_fasta_for_sorting( fasta_filename ) | |
153 sorted_names = map( str, range( 1, 23 ) ) + [ 'X', 'Y' ] | |
154 #detect if we have chrN, or just N | |
155 has_chr = False | |
156 for chrom in sorted_names: | |
157 if "chr%s" % chrom in current_order: | |
158 has_chr = True | |
159 break | |
160 | |
161 if has_chr: | |
162 sorted_names = map( lambda x: "chr%s" % x, sorted_names) | |
163 sorted_names.insert( 0, "chrM" ) | |
164 else: | |
165 sorted_names.insert( 0, "MT" ) | |
166 sorted_names.extend( map( lambda x: "%s_random" % x, sorted_names ) ) | |
167 | |
168 existing_sorted_names = [] | |
169 for name in sorted_names: | |
170 if name in current_order: | |
171 existing_sorted_names.append( name ) | |
172 for name in current_order: | |
173 #TODO: confirm that non-canonical names do not need to be sorted specially | |
174 if name not in existing_sorted_names: | |
175 existing_sorted_names.append( name ) | |
176 | |
177 if existing_sorted_names == current_order: | |
178 shutil.move( unsorted_filename, fasta_filename ) | |
179 else: | |
180 _write_sorted_fasta( existing_sorted_names, fasta_offsets, fasta_filename, unsorted_filename ) | |
181 | |
182 | |
183 def _sort_fasta_custom( fasta_filename, params ): | |
184 ( unsorted_filename, fasta_offsets, current_order ) = _move_and_index_fasta_for_sorting( fasta_filename ) | |
185 sorted_names = [] | |
186 for id_repeat in params['param_dict']['sorting']['sequence_identifiers']: | |
187 sorted_names.append( id_repeat[ 'identifier' ] ) | |
188 handle_not_listed = params['param_dict']['sorting']['handle_not_listed_selector'] | |
189 if handle_not_listed.startswith( 'keep' ): | |
190 add_list = [] | |
191 for name in current_order: | |
192 if name not in sorted_names: | |
193 add_list.append( name ) | |
194 if add_list: | |
195 if handle_not_listed == 'keep_append': | |
196 sorted_names.extend( add_list ) | |
197 else: | |
198 add_list.extend( sorted_names ) | |
199 sorted_names = add_list | |
200 if sorted_names == current_order: | |
201 shutil.move( unsorted_filename, fasta_filename ) | |
202 else: | |
203 _write_sorted_fasta( sorted_names, fasta_offsets, fasta_filename, unsorted_filename ) | |
204 | |
205 | |
206 def _download_file(start, fh): | |
207 tmp = tempfile.NamedTemporaryFile() | |
208 tmp.write(start) | |
209 tmp.write(fh.read()) | |
210 tmp.flush() | |
211 tmp.seek(0) | |
212 return tmp | |
213 | |
214 | |
215 def get_stream_reader(fh, tmp_dir): | |
216 """ | |
217 Check if file is compressed and return correct stream reader. | |
218 If file has to be downloaded, do it now. | |
219 """ | |
220 magic_dict = { | |
221 b"\x1f\x8b\x08": _get_stream_readers_for_gzip, | |
222 b"\x42\x5a\x68": _get_stream_readers_for_bz2, | |
223 b"\x50\x4b\x03\x04": _get_stream_readers_for_zip, | |
224 } | |
225 start_of_file = fh.read(CHUNK_SIZE) | |
226 try: | |
227 fh.seek(0) | |
228 except UnsupportedOperation: # This is if fh has been created by urlopen | |
229 fh = _download_file(start_of_file, fh) | |
230 for k,v in magic_dict.items(): | |
231 if start_of_file.startswith(k): | |
232 return v(fh, tmp_dir) | |
233 try: # Check if file is tar file | |
234 if tarfile.open(fileobj=StringIO(start_of_file)): | |
235 return _get_stream_readers_for_tar(fh, tmp_dir) | |
236 except tarfile.ReadError: | |
237 pass | |
238 return fh | |
239 | |
240 | |
241 def _get_ucsc_download_address(params, dbkey): | |
242 """ | |
243 Check if we can find the correct file for the supplied dbkey on UCSC's FTP server | |
244 """ | |
245 UCSC_FTP_SERVER = 'hgdownload.cse.ucsc.edu' | |
246 UCSC_DOWNLOAD_PATH = '/goldenPath/%s/bigZips/' | |
247 COMPRESSED_EXTENSIONS = ['.tar.gz', '.tgz', '.tar.bz2', '.zip', '.fa.gz', '.fa.bz2'] | |
248 | |
249 email = params['param_dict']['__user_email__'] | |
250 if not email: | |
251 email = 'anonymous@example.com' | |
252 | |
253 ucsc_dbkey = params['param_dict']['reference_source']['requested_dbkey'] or dbkey | |
254 UCSC_CHROM_FA_FILENAMES = ['%s.chromFa' % ucsc_dbkey, 'chromFa', ucsc_dbkey] | |
255 | |
256 ftp = FTP(UCSC_FTP_SERVER) | |
257 ftp.login('anonymous', email) | |
258 | |
259 ucsc_path = UCSC_DOWNLOAD_PATH % ucsc_dbkey | |
260 path_contents = _get_files_in_ftp_path(ftp, ucsc_path) | |
261 ftp.quit() | |
262 | |
263 for ucsc_chrom_fa_filename in UCSC_CHROM_FA_FILENAMES: | |
264 for ext in COMPRESSED_EXTENSIONS: | |
265 if "%s%s" % (ucsc_chrom_fa_filename, ext) in path_contents: | |
266 ucsc_file_name = "%s%s%s" % (ucsc_path, ucsc_chrom_fa_filename, ext) | |
267 return "ftp://%s%s" % (UCSC_FTP_SERVER, ucsc_file_name) | |
268 | |
269 raise Exception('Unable to determine filename for UCSC Genome for %s: %s' % (ucsc_dbkey, path_contents)) | |
270 | |
271 def add_fasta_to_table(data_manager_dict, fasta_readers, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, params): | |
272 for data_table_name, data_table_entry in _stream_fasta_to_file( fasta_readers, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, params ): | |
273 if data_table_entry: | |
274 _add_data_table_entry( data_manager_dict, data_table_entry, data_table_name ) | |
275 | |
276 | |
277 def download_from_ucsc( data_manager_dict, params, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, tmp_dir ): | |
278 url = _get_ucsc_download_address(params, dbkey) | |
279 fasta_readers = get_stream_reader(urlopen(url), tmp_dir) | |
280 add_fasta_to_table(data_manager_dict, fasta_readers, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, params) | |
281 | |
282 | |
283 def download_from_ncbi( data_manager_dict, params, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, tmp_dir ): | |
284 NCBI_DOWNLOAD_URL = 'http://togows.dbcls.jp/entry/ncbi-nucleotide/%s.fasta' #FIXME: taken from dave's genome manager...why some japan site? | |
285 requested_identifier = params['param_dict']['reference_source']['requested_identifier'] | |
286 url = NCBI_DOWNLOAD_URL % requested_identifier | |
287 fasta_readers = get_stream_reader(urlopen(url), tmp_dir) | |
288 add_fasta_to_table(data_manager_dict, fasta_readers, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, params) | |
289 | |
290 | |
291 def download_from_url( data_manager_dict, params, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, tmp_dir ): | |
292 urls = filter( bool, map( lambda x: x.strip(), params['param_dict']['reference_source']['user_url'].split( '\n' ) ) ) | |
293 fasta_readers = [ get_stream_reader(urlopen( url ), tmp_dir) for url in urls ] | |
294 add_fasta_to_table(data_manager_dict, fasta_readers, target_directory, dbkey, dbkey_name, sequence_id,sequence_name, params) | |
295 | |
296 | |
297 def download_from_history( data_manager_dict, params, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, tmp_dir ): | |
298 #TODO: allow multiple FASTA input files | |
299 input_filename = params['param_dict']['reference_source']['input_fasta'] | |
300 if isinstance( input_filename, list ): | |
301 fasta_readers = [ get_stream_reader(open(filename, 'rb'), tmp_dir) for filename in input_filename ] | |
302 else: | |
303 fasta_readers = get_stream_reader(open(input_filename), tmp_dir) | |
304 add_fasta_to_table(data_manager_dict, fasta_readers, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, params) | |
305 | |
306 | |
307 def copy_from_directory( data_manager_dict, params, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, tmp_dir ): | |
308 input_filename = params['param_dict']['reference_source']['fasta_filename'] | |
309 create_symlink = params['param_dict']['reference_source']['create_symlink'] == 'create_symlink' | |
310 if create_symlink: | |
311 data_table_entries = _create_symlink( input_filename, target_directory, dbkey, dbkey_name, sequence_id, sequence_name ) | |
312 else: | |
313 if isinstance( input_filename, list ): | |
314 fasta_readers = [ get_stream_reader(open(filename, 'rb'), tmp_dir) for filename in input_filename ] | |
315 else: | |
316 fasta_readers = get_stream_reader(open(input_filename), tmp_dir) | |
317 data_table_entries = _stream_fasta_to_file( fasta_readers, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, params ) | |
318 for data_table_name, data_table_entry in data_table_entries: | |
319 if data_table_entry: | |
320 _add_data_table_entry( data_manager_dict, data_table_entry, data_table_name ) | |
321 | |
322 | |
323 def _add_data_table_entry( data_manager_dict, data_table_entry, data_table_name ): | |
324 data_manager_dict['data_tables'] = data_manager_dict.get( 'data_tables', {} ) | |
325 data_manager_dict['data_tables'][data_table_name] = data_manager_dict['data_tables'].get( DATA_TABLE_NAME, [] ) | |
326 data_manager_dict['data_tables'][data_table_name].append( data_table_entry ) | |
327 return data_manager_dict | |
328 | |
329 | |
330 def _stream_fasta_to_file( fasta_stream, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, params, close_stream=True ): | |
331 fasta_base_filename = "%s.gff" % sequence_id | |
332 fasta_filename = os.path.join( target_directory, fasta_base_filename ) | |
333 with open( fasta_filename, 'wb+' ) as fasta_writer: | |
334 | |
335 if isinstance( fasta_stream, list ) and len( fasta_stream ) == 1: | |
336 fasta_stream = fasta_stream[0] | |
337 | |
338 if isinstance( fasta_stream, list ): | |
339 last_char = None | |
340 for fh in fasta_stream: | |
341 if last_char not in [ None, '\n', '\r', b'\n', b'\r' ]: | |
342 fasta_writer.write( b'\n' ) | |
343 while True: | |
344 data = fh.read( CHUNK_SIZE ) | |
345 if data: | |
346 fasta_writer.write( data ) | |
347 last_char = data[-1] | |
348 else: | |
349 break | |
350 if close_stream: | |
351 fh.close() | |
352 else: | |
353 while True: | |
354 data = fasta_stream.read( CHUNK_SIZE ) | |
355 if data: | |
356 fasta_writer.write( data ) | |
357 else: | |
358 break | |
359 if close_stream: | |
360 fasta_stream.close() | |
361 | |
362 #sort_fasta( fasta_filename, params['param_dict']['sorting']['sort_selector'], params ) | |
363 | |
364 | |
365 return [ ( DATA_TABLE_NAME, dict( value=sequence_id, dbkey=dbkey, name=sequence_name, path=fasta_base_filename ) ) ] | |
366 | |
367 | |
368 def compute_fasta_length( fasta_file, out_file, keep_first_word=False ): | |
369 | |
370 infile = fasta_file | |
371 out = open( out_file, 'w') | |
372 | |
373 fasta_title = '' | |
374 seq_len = 0 | |
375 | |
376 first_entry = True | |
377 | |
378 for line in open( infile ): | |
379 line = line.strip() | |
380 if not line or line.startswith( '#' ): | |
381 continue | |
382 if line[0] == '>': | |
383 if first_entry == False: | |
384 if keep_first_word: | |
385 fasta_title = fasta_title.split()[0] | |
386 out.write( "%s\t%d\n" % ( fasta_title[ 1: ], seq_len ) ) | |
387 else: | |
388 first_entry = False | |
389 fasta_title = line | |
390 seq_len = 0 | |
391 else: | |
392 seq_len += len(line) | |
393 | |
394 # last fasta-entry | |
395 if keep_first_word: | |
396 fasta_title = fasta_title.split()[0] | |
397 out.write( "%s\t%d\n" % ( fasta_title[ 1: ], seq_len ) ) | |
398 out.close() | |
399 | |
400 | |
401 def _create_symlink( input_filename, target_directory, dbkey, dbkey_name, sequence_id, sequence_name ): | |
402 fasta_base_filename = "%s.fa" % sequence_id | |
403 fasta_filename = os.path.join( target_directory, fasta_base_filename ) | |
404 os.symlink( input_filename, fasta_filename ) | |
405 return [ ( DATA_TABLE_NAME, dict( value=sequence_id, dbkey=dbkey, name=sequence_name, path=fasta_base_filename ) ) ] | |
406 | |
407 | |
408 REFERENCE_SOURCE_TO_DOWNLOAD = dict( ucsc=download_from_ucsc, ncbi=download_from_ncbi, url=download_from_url, history=download_from_history, directory=copy_from_directory ) | |
409 | |
410 SORTING_METHODS = dict( as_is=_sort_fasta_as_is, lexicographical=_sort_fasta_lexicographical, gatk=_sort_fasta_gatk, custom=_sort_fasta_custom ) | |
411 | |
412 | |
413 def main(): | |
414 #Parse Command Line | |
415 parser = optparse.OptionParser() | |
416 parser.add_option( '-d', '--dbkey_description', dest='dbkey_description', action='store', type="string", default=None, help='dbkey_description' ) | |
417 parser.add_option( '-t', '--type', dest='file_type', action='store', type='string', default=None, help='file_type') | |
418 (options, args) = parser.parse_args() | |
419 | |
420 filename = args[0] | |
421 global DATA_TABLE_NAME | |
422 if options.file_type == 'representative': | |
423 DATA_TABLE_NAME= 'representative_gff' | |
424 params = loads( open( filename ).read() ) | |
425 target_directory = params[ 'output_data' ][0]['extra_files_path'] | |
426 os.mkdir( target_directory ) | |
427 data_manager_dict = {} | |
428 | |
429 dbkey, dbkey_name, sequence_id, sequence_name = get_dbkey_dbname_id_name( params, dbkey_description=options.dbkey_description ) | |
430 | |
431 if dbkey in [ None, '', '?' ]: | |
432 raise Exception( '"%s" is not a valid dbkey. You must specify a valid dbkey.' % ( dbkey ) ) | |
433 | |
434 # Create a tmp_dir, in case a zip file needs to be uncompressed | |
435 tmp_dir = tempfile.mkdtemp() | |
436 #Fetch the FASTA | |
437 try: | |
438 REFERENCE_SOURCE_TO_DOWNLOAD[ params['param_dict']['reference_source']['reference_source_selector'] ]( data_manager_dict, params, target_directory, dbkey, dbkey_name, sequence_id, sequence_name, tmp_dir ) | |
439 finally: | |
440 cleanup_before_exit(tmp_dir) | |
441 #save info to json file | |
442 open( filename, 'wb' ).write( dumps( data_manager_dict ).encode() ) | |
443 | |
444 if __name__ == "__main__": | |
445 main() |