6
|
1 #!/usr/bin/env python
|
|
2 import json
|
|
3 import optparse
|
|
4 import urllib
|
|
5 import os.path
|
|
6 import os
|
|
7 from operator import itemgetter
|
|
8 import tarfile
|
|
9
|
|
10 __version__ = "1.0.0"
|
|
11 CHUNK_SIZE = 2**20 #1mb
|
|
12 VALID_CHARS = '.-()[]0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ '
|
|
13
|
|
14
|
|
15 def splitext(path):
|
|
16 for ext in ['.tar.gz', '.tar.bz2']:
|
|
17 if path.endswith(ext):
|
|
18 path, ext = path[:-len(ext)], path[-len(ext):]
|
|
19 break
|
|
20 else:
|
|
21 path, ext = os.path.splitext(path)
|
|
22 return path, ext[1:]
|
|
23
|
|
24
|
|
25 def chunk_write( source_stream, target_stream, source_method = "read", target_method="write" ):
|
|
26 source_method = getattr( source_stream, source_method )
|
|
27 target_method = getattr( target_stream, target_method )
|
|
28 while True:
|
|
29 chunk = source_method( CHUNK_SIZE )
|
|
30 if chunk:
|
|
31 target_method( chunk )
|
|
32 else:
|
|
33 break
|
|
34
|
|
35
|
|
36 def deconstruct_multi_filename( multi_filename ):
|
|
37 keys = [ 'primary', 'id', 'name', 'visible', 'file_type' ]
|
|
38 return ( dict( zip( keys, multi_filename.split('_') ) ) )
|
|
39
|
|
40
|
|
41 def construct_multi_filename( id, name, file_type ):
|
|
42 """ Implementation of *Number of Output datasets cannot be determined until tool run* from documentation_.
|
|
43 .. _documentation: http://wiki.galaxyproject.org/Admin/Tools/Multiple%20Output%20Files
|
|
44 """
|
|
45 filename = "%s_%s_%s_%s_%s" % ( 'primary', id, name, 'visible', file_type )
|
|
46 return filename
|
|
47
|
|
48
|
|
49 def download_from_query( query_data, target_output_filename ):
|
|
50 """ Download file from the json data and write it to target_output_filename.
|
|
51 """
|
|
52 query_url = query_data.get( 'url' )
|
|
53 query_file_type = query_data.get( 'extension' )
|
|
54 query_stream = urllib.urlopen( query_url )
|
|
55 output_stream = open( target_output_filename, 'wb' )
|
|
56 chunk_write( query_stream, output_stream )
|
|
57 query_stream.close()
|
|
58 output_stream.close()
|
|
59
|
12
|
60 def store_file_from_archive( file_object, target_output_filename, isString=False ):
|
6
|
61 """ Store file after extracting from archive and organize them as a collection using the structure
|
|
62 (collection-name)_(file-name).ext as file name
|
|
63 """
|
|
64 output_stream = open( target_output_filename, 'wb' )
|
12
|
65 #chunk_write( file_object.read(), output_stream )
|
|
66 if not isString:
|
|
67 output_stream.write(file_object.read())
|
|
68 else:
|
|
69 output_stream.write(file_object)
|
6
|
70 output_stream.close()
|
|
71
|
|
72
|
|
73 def download_extra_data( query_ext_data, base_path ):
|
|
74 """ Download any extra data defined in the JSON.
|
|
75 NOTE: the "path" value is a relative path to the file on our
|
|
76 file system. This is slightly dangerous and we should make every effort
|
|
77 to avoid a malicious absolute path to write the file elsewhere on the
|
|
78 filesystem.
|
|
79 """
|
|
80 for ext_data in query_ext_data:
|
|
81 if not os.path.exists( base_path ):
|
|
82 os.mkdir( base_path )
|
|
83 query_stream = urllib.urlopen( ext_data.get( 'url' ) )
|
|
84 ext_path = ext_data.get( 'path' )
|
|
85 os.makedirs( os.path.normpath( '/'.join( [ base_path, os.path.dirname( ext_path ) ] ) ) )
|
|
86 output_stream = open( os.path.normpath( '/'.join( [ base_path, ext_path ] ) ), 'wb' )
|
|
87 chunk_write( query_stream, output_stream )
|
|
88 query_stream.close()
|
|
89 output_stream.close()
|
|
90
|
|
91
|
|
92 def metadata_to_json( dataset_id, metadata, filename, ds_type='dataset', primary=False):
|
|
93 """ Return line separated JSON """
|
|
94 meta_dict = dict( type = ds_type,
|
|
95 ext = metadata.get( 'extension' ),
|
|
96 filename = filename,
|
|
97 name = metadata.get( 'name' ),
|
|
98 metadata = metadata.get( 'metadata', {} ) )
|
|
99 if metadata.get( 'extra_data', None ):
|
|
100 meta_dict[ 'extra_files' ] = '_'.join( [ filename, 'files' ] )
|
|
101 if primary:
|
|
102 meta_dict[ 'base_dataset_id' ] = dataset_id
|
|
103 else:
|
|
104 meta_dict[ 'dataset_id' ] = dataset_id
|
|
105 return "%s\n" % json.dumps( meta_dict )
|
|
106
|
|
107
|
16
|
108 def walk_on_archive(target_output_filename, check_ext, archive_name, appdata_path, db_key="?"):
|
12
|
109 archive_name = archive_name.replace("_", "-").replace(".", "-")
|
|
110 with tarfile.open( target_output_filename, check_ext ) as tf:
|
|
111 for entry in tf:
|
|
112 if entry.isfile():
|
|
113 fileobj = tf.extractfile( entry )
|
|
114 # reserve the underscore for the collection searator
|
|
115 filename = os.path.basename( entry.name ).replace("_", "-")
|
|
116 extension = splitext( filename )[1]
|
16
|
117 # pattern: (?P<identifier_0>[^_]+)_(?P<identifier_1>[^_]+)_(?P<ext>[^_]+)_(?P<dbkey>[^_]+)
|
12
|
118 if (len(extension) > 0):
|
18
|
119 filename = (filename[0:len(filename)-(len(extension)+1)]).replace(".", "-") + "." + extension + "_" + extension
|
12
|
120 else:
|
|
121 extension = "auto"
|
16
|
122 filename_with_collection_prefix = archive_name + "_" + filename + "_" + db_key
|
12
|
123 target_entry_output_filename = os.path.join(appdata_path, filename_with_collection_prefix)
|
|
124 store_file_from_archive( fileobj, target_entry_output_filename )
|
|
125 return True
|
|
126
|
|
127
|
|
128 def download_files_and_write_metadata(query_item, json_params, output_base_path, metadata_parameter_file, primary, appdata_path, options, args):
|
6
|
129 """ Main work function that operates on the JSON representation of
|
|
130 one dataset and its metadata. Returns True.
|
|
131 """
|
|
132 dataset_url, output_filename, \
|
|
133 extra_files_path, file_name, \
|
|
134 ext, out_data_name, \
|
|
135 hda_id, dataset_id = set_up_config_values(json_params)
|
|
136 extension = query_item.get( 'extension' )
|
14
|
137 url = query_item.get( 'url' )
|
12
|
138 filename = query_item.get( 'name' )
|
|
139
|
|
140 check_ext = ""
|
14
|
141 if ( url.endswith( "gz" ) ):
|
12
|
142 check_ext = "r:gz"
|
14
|
143 elif ( url.endswith( "bz2" ) ):
|
12
|
144 check_ext = "r:bz2"
|
14
|
145 elif ( url.endswith( "tar" ) ):
|
12
|
146 check_ext = "r:"
|
|
147 isArchive = bool( check_ext and check_ext.strip() )
|
|
148
|
6
|
149 extra_data = query_item.get( 'extra_data', None )
|
|
150 if primary:
|
|
151 filename = ''.join( c in VALID_CHARS and c or '-' for c in filename )
|
|
152 name = construct_multi_filename( hda_id, filename, extension )
|
|
153 target_output_filename = os.path.normpath( '/'.join( [ output_base_path, name ] ) )
|
12
|
154 if isArchive is False:
|
|
155 metadata_parameter_file.write( metadata_to_json( dataset_id, query_item,
|
|
156 target_output_filename,
|
|
157 ds_type='new_primary_dataset',
|
|
158 primary=primary) )
|
6
|
159 else:
|
|
160 target_output_filename = output_filename
|
12
|
161 if isArchive is False:
|
|
162 metadata_parameter_file.write( metadata_to_json( dataset_id, query_item,
|
|
163 target_output_filename,
|
|
164 ds_type='dataset',
|
|
165 primary=primary) )
|
|
166
|
14
|
167 if isArchive is False:
|
|
168 download_from_query( query_item, target_output_filename )
|
|
169 else:
|
|
170 target_output_path = os.path.join(appdata_path, filename)
|
|
171 download_from_query( query_item, target_output_path )
|
6
|
172 if extra_data:
|
|
173 extra_files_path = ''.join( [ target_output_filename, 'files' ] )
|
|
174 download_extra_data( extra_data, extra_files_path )
|
|
175
|
10
|
176 """ the following code handles archives and decompress them in a collection """
|
12
|
177 if ( isArchive ):
|
16
|
178 db_key = "?"
|
|
179 archive_metadata = query_item.get( 'metadata', None )
|
|
180 if archive_metadata is not None:
|
|
181 try:
|
|
182 db_key = archive_metadata.get( 'db_key' )
|
|
183 except:
|
|
184 pass
|
|
185 walk_on_archive(target_output_path, check_ext, filename, appdata_path, db_key)
|
12
|
186
|
6
|
187 return True
|
|
188
|
|
189
|
|
190 def set_up_config_values(json_params):
|
|
191 """ Parse json_params file and return a tuple of necessary configuration
|
|
192 values.
|
|
193 """
|
|
194 datasource_params = json_params.get( 'param_dict' )
|
|
195 dataset_url = datasource_params.get( 'URL' )
|
|
196 output_filename = datasource_params.get( 'output1', None )
|
|
197 output_data = json_params.get( 'output_data' )
|
|
198 extra_files_path, file_name, ext, out_data_name, hda_id, dataset_id = \
|
|
199 itemgetter('extra_files_path', 'file_name', 'ext', 'out_data_name', 'hda_id', 'dataset_id')(output_data[0])
|
|
200 return (dataset_url, output_filename,
|
|
201 extra_files_path, file_name,
|
|
202 ext, out_data_name,
|
|
203 hda_id, dataset_id)
|
|
204
|
|
205
|
14
|
206 def download_from_json_data( options, args ):
|
6
|
207 """ Parse the returned JSON data and download files. Write metadata
|
|
208 to flat JSON file.
|
|
209 """
|
|
210 output_base_path = options.path
|
|
211 appdata_path = options.appdata
|
|
212 if not os.path.exists(appdata_path):
|
|
213 os.makedirs(appdata_path)
|
|
214
|
|
215 # read tool job configuration file and parse parameters we need
|
14
|
216 json_params = json.loads( open( options.json_param_file, 'r' ).read() )
|
12
|
217
|
6
|
218 dataset_url, output_filename, \
|
|
219 extra_files_path, file_name, \
|
|
220 ext, out_data_name, \
|
|
221 hda_id, dataset_id = set_up_config_values(json_params)
|
|
222 # line separated JSON file to contain all dataset metadata
|
|
223 metadata_parameter_file = open( json_params['job_config']['TOOL_PROVIDED_JOB_METADATA_FILE'], 'wb' )
|
|
224
|
|
225 # get JSON response from data source
|
|
226 # TODO: make sure response is not enormous
|
14
|
227 query_params = json.loads(urllib.urlopen( dataset_url ).read())
|
6
|
228 # download and write files
|
14
|
229 #primary = False
|
|
230 primary = True
|
6
|
231 # query_item, hda_id, output_base_path, dataset_id
|
|
232 for query_item in query_params:
|
|
233 if isinstance( query_item, list ):
|
|
234 # TODO: do something with the nested list as a collection
|
|
235 for query_subitem in query_item:
|
|
236 primary = download_files_and_write_metadata(query_subitem, json_params, output_base_path,
|
12
|
237 metadata_parameter_file, primary, appdata_path, options, args)
|
6
|
238
|
|
239 elif isinstance( query_item, dict ):
|
|
240 primary = download_files_and_write_metadata(query_item, json_params, output_base_path,
|
12
|
241 metadata_parameter_file, primary, appdata_path, options, args)
|
6
|
242 metadata_parameter_file.close()
|
|
243
|
|
244 def __main__():
|
|
245 """ Read the JSON return from a data source. Parse each line and request
|
|
246 the data, download to "newfilepath", and write metadata.
|
|
247
|
|
248 Schema
|
|
249 ------
|
|
250
|
|
251 [ {"url":"http://url_of_file",
|
|
252 "name":"encode WigData",
|
|
253 "extension":"wig",
|
|
254 "metadata":{"db_key":"hg19"},
|
|
255 "extra_data":[ {"url":"http://url_of_ext_file",
|
|
256 "path":"rel/path/to/ext_file"}
|
|
257 ]
|
|
258 }
|
|
259 ]
|
|
260
|
|
261 """
|
|
262 # Parse the command line options
|
|
263 usage = "Usage: json_data_source_mod.py max_size --json_param_file filename [options]"
|
|
264 parser = optparse.OptionParser(usage = usage)
|
|
265 parser.add_option("-j", "--json_param_file", type="string",
|
|
266 action="store", dest="json_param_file", help="json schema return data")
|
|
267 parser.add_option("-p", "--path", type="string",
|
|
268 action="store", dest="path", help="new file path")
|
|
269 parser.add_option("-a", "--appdata", type="string",
|
|
270 action="store", dest="appdata", help="appdata folder name")
|
|
271 parser.add_option("-v", "--version", action="store_true", dest="version",
|
|
272 default=False, help="display version and exit")
|
|
273
|
|
274 (options, args) = parser.parse_args()
|
|
275 if options.version:
|
|
276 print __version__
|
|
277 else:
|
|
278 download_from_json_data( options, args )
|
|
279
|
|
280
|
|
281 if __name__ == "__main__": __main__()
|