Mercurial > repos > fabio > gdcwebapp
comparison json_data_source_mod.py @ 10:c0be9583df97 draft
Uploaded 20170525
author | fabio |
---|---|
date | Thu, 25 May 2017 17:58:15 -0400 |
parents | 7815152f70c6 |
children | 80593f75d74a |
comparison
equal
deleted
inserted
replaced
9:61989e353d24 | 10:c0be9583df97 |
---|---|
143 download_from_query( query_item, target_output_filename ) | 143 download_from_query( query_item, target_output_filename ) |
144 if extra_data: | 144 if extra_data: |
145 extra_files_path = ''.join( [ target_output_filename, 'files' ] ) | 145 extra_files_path = ''.join( [ target_output_filename, 'files' ] ) |
146 download_extra_data( extra_data, extra_files_path ) | 146 download_extra_data( extra_data, extra_files_path ) |
147 | 147 |
148 """ the following code handles archives and decompress them in a collection """ | |
148 check_ext = "" | 149 check_ext = "" |
149 if ( fname.endswith( "gz" ) ): | 150 if ( fname.endswith( "gz" ) ): |
150 check_ext = "r:gz" | 151 check_ext = "r:gz" |
151 elif ( fname.endswith( "bz2" ) ): | 152 elif ( fname.endswith( "bz2" ) ): |
152 check_ext = "r:bz2" | 153 check_ext = "r:bz2" |
155 if ( bool( check_ext and check_ext.strip() ) ): | 156 if ( bool( check_ext and check_ext.strip() ) ): |
156 with tarfile.open( target_output_filename, check_ext ) as tf: | 157 with tarfile.open( target_output_filename, check_ext ) as tf: |
157 for entry in tf: | 158 for entry in tf: |
158 fileobj = tf.extractfile( entry ) | 159 fileobj = tf.extractfile( entry ) |
159 if entry.isfile(): | 160 if entry.isfile(): |
160 """ | 161 |
161 dataset_url, output_filename, \ | 162 #dataset_url, output_filename, \ |
162 extra_files_path, file_name, \ | 163 # extra_files_path, file_name, \ |
163 ext, out_data_name, \ | 164 # ext, out_data_name, \ |
164 hda_id, dataset_id = set_up_config_values(json_params) | 165 # hda_id, dataset_id = set_up_config_values(json_params) |
165 """ | 166 |
166 filename = os.path.basename( entry.name ) | 167 filename = os.path.basename( entry.name ) |
167 extension = splitext( filename ) | 168 extension = splitext( filename ) |
168 extra_data = None | 169 extra_data = None |
169 #target_output_filename = output_filename | 170 #target_output_filename = output_filename |
170 """ (?P<archive_name>.*)_(?P<file_name>.*)\..* """ | 171 # (?P<archive_name>.*)_(?P<file_name>.*)\..* |
171 filename_with_collection_prefix = query_item.get( 'name' ) + "_" + filename | 172 filename_with_collection_prefix = query_item.get( 'name' ) + "_" + filename |
172 target_output_filename = os.path.join(appdata_path, filename_with_collection_prefix) | 173 target_output_filename = os.path.join(appdata_path, filename_with_collection_prefix) |
173 """ | 174 |
174 metadata_parameter_file.write( metadata_to_json_for_archive_entry( dataset_id, extension, | 175 #metadata_parameter_file.write( metadata_to_json_for_archive_entry( dataset_id, extension, |
175 filename, target_output_filename, | 176 # filename, target_output_filename, |
176 ds_type='dataset', | 177 # ds_type='dataset', |
177 primary=primary) ) | 178 # primary=primary) ) |
178 """ | 179 |
179 store_file_from_archive( fileobj, target_output_filename ) | 180 store_file_from_archive( fileobj, target_output_filename ) |
180 | 181 |
181 return True | 182 return True |
182 | 183 |
183 | 184 |