0
|
1 #!/usr/bin/env python
|
|
2 import argparse
|
|
3 import os
|
|
4 import shutil
|
|
5 import sys
|
|
6 import threading
|
|
7 import time
|
10
|
8 from datetime import datetime
|
0
|
9
|
|
10 from bioblend import galaxy
|
10
|
11
|
0
|
12 from six.moves import configparser
|
|
13
|
|
14 parser = argparse.ArgumentParser()
|
|
15 parser.add_argument('--affy_metadata', dest='affy_metadata', help='Input Affymetrix 96 well plate metadata file')
|
|
16 parser.add_argument('--annot', dest='annot', help='Probeset annotation file')
|
|
17 parser.add_argument('--api_key', dest='api_key', help='Current user API key')
|
|
18 parser.add_argument('--calls', dest='calls', help='Apt-probeset genotype calls file')
|
|
19 parser.add_argument('--confidences', dest='confidences', help='Apt-probeset genotype confidences file')
|
|
20 parser.add_argument('--config_file', dest='config_file', help='qgw_config.ini')
|
|
21 parser.add_argument('--dbkey', dest='dbkey', help='Reference genome dbkey')
|
|
22 parser.add_argument('--reference_genome', dest='reference_genome', help='Reference genome')
|
|
23 parser.add_argument('--history_id', dest='history_id', help='Encoded id of current history')
|
|
24 parser.add_argument('--output', dest='output', help='Output dataset')
|
6
|
25 parser.add_argument('--output_nj_phylogeny_tree', dest='output_nj_phylogeny_tree', help='Flag to plot neighbor-joining phylogeny tree')
|
0
|
26 parser.add_argument('--report', dest='report', help='Apt-probeset genotype report file')
|
|
27 parser.add_argument('--sample_attributes', dest='sample_attributes', help='Sample attributes tabular file')
|
|
28 parser.add_argument('--snp-posteriors', dest='snp-posteriors', help='Apt-probeset genotype snp-posteriors file')
|
|
29 parser.add_argument('--summary', dest='summary', help='Apt-probeset genotype summary file')
|
|
30 args = parser.parse_args()
|
|
31
|
|
32
|
|
33 def add_library_dataset_to_history(gi, history_id, dataset_id, history_datasets, outputfh):
|
|
34 # Add a data library dataset to a history.
|
|
35 outputfh.write('\nImporting dataset into current history.\n')
|
|
36 new_hda_dict = gi.histories.upload_dataset_from_library(history_id, dataset_id)
|
|
37 new_hda_name = new_hda_dict['name']
|
|
38 history_datasets[new_hda_name] = new_hda_dict
|
|
39 return history_datasets
|
|
40
|
|
41
|
|
42 def copy_history_dataset_to_library(gi, library_id, dataset_id, outputfh):
|
|
43 # Copy a history dataset to a data library.
|
|
44 outputfh.write('\nCopying history dataset with id %s to data library with id %s.\n' % (str(dataset_id), str(library_id)))
|
|
45 new_library_dataset_dict = gi.libraries.copy_from_dataset(library_id, dataset_id)
|
|
46 return new_library_dataset_dict
|
|
47
|
|
48
|
|
49 def copy_dataset_to_storage(src_path, dst_base_path, dataset_name, output_fh):
|
1
|
50 # Copy a dataset to a storage directory on disk. Use the date
|
|
51 # to name the storage directory to enable storing a file per day
|
|
52 # (multiple runs per day will overwrite the existing file).
|
|
53 date_str = datetime.now().strftime("%Y_%m_%d")
|
|
54 dst_dir = os.path.join(dst_base_path, date_str)
|
|
55 if not os.path.isdir(dst_dir):
|
|
56 os.makedirs(dst_dir)
|
|
57 dst_path = os.path.join(dst_dir, dataset_name)
|
0
|
58 shutil.copyfile(src_path, dst_path)
|
|
59 outputfh.write("Copied %s to storage.\n" % dataset_name)
|
|
60
|
|
61
|
|
62 def delete_history_dataset(gi, history_id, dataset_id, outputfh, purge=False):
|
|
63 # Delete a history dataset.
|
|
64 outputfh.write("\nDeleting history dataset with id %s.\n" % dataset_id)
|
|
65 gi.histories.delete_dataset(history_id, dataset_id, purge=purge)
|
|
66
|
|
67
|
|
68 def delete_library_dataset(gi, library_id, dataset_id, outputfh, purged=False):
|
|
69 # Delete a library dataset.
|
|
70 outputfh.write("\nDeleting library dataset with id %s.\n" % dataset_id)
|
|
71 deleted_dataset_dict = gi.libraries.delete_library_dataset(library_id, dataset_id, purged=purged)
|
|
72 return deleted_dataset_dict
|
|
73
|
|
74
|
|
75 def get_config_settings(config_file, section='defaults'):
|
|
76 # Return a dictionary consisting of the key / value pairs
|
|
77 # of the defaults section of config_file.
|
|
78 d = {}
|
|
79 config_parser = configparser.ConfigParser()
|
|
80 config_parser.read(config_file)
|
|
81 for key, value in config_parser.items(section):
|
|
82 if section == 'defaults':
|
10
|
83 d[key.upper()] = value
|
0
|
84 else:
|
|
85 d[key] = value
|
|
86 return d
|
|
87
|
|
88
|
|
89 def get_data_library_dict(gi, name, outputfh):
|
|
90 # Use the Galaxy API to get the data library named name.
|
|
91 outputfh.write("\nSearching for data library named %s.\n" % name)
|
|
92 # The following is not correctly filtering out deleted libraries.
|
|
93 data_lib_dicts = gi.libraries.get_libraries(library_id=None, name=name, deleted=False)
|
|
94 for data_lib_dict in data_lib_dicts:
|
|
95 if data_lib_dict['name'] == name and data_lib_dict['deleted'] not in [True, 'true', 'True']:
|
|
96 outputfh.write("Found data library named %s.\n" % name)
|
|
97 outputfh.write("%s\n" % str(data_lib_dict))
|
|
98 return data_lib_dict
|
|
99 return None
|
|
100
|
|
101
|
|
102 def get_history_status(gi, history_id):
|
|
103 return gi.histories.get_status(history_id)
|
|
104
|
|
105
|
|
106 def get_history_dataset_id_by_name(gi, history_id, dataset_name, outputfh):
|
|
107 # Use the Galaxy API to get the bcftools merge dataset id
|
|
108 # from the current history.
|
|
109 outputfh.write("\nSearching for history dataset named %s.\n" % str(dataset_name))
|
|
110 history_dataset_dicts = get_history_datasets(gi, history_id)
|
4
|
111 for name, hd_dict in list(history_dataset_dicts.items()):
|
0
|
112 name = name.lower()
|
|
113 if name.startswith(dataset_name.lower()):
|
|
114 outputfh.write("Found dataset named %s.\n" % str(dataset_name))
|
|
115 return hd_dict['id']
|
|
116 return None
|
|
117
|
|
118
|
|
119 def get_history_datasets(gi, history_id):
|
|
120 history_datasets = {}
|
|
121 history_dict = gi.histories.show_history(history_id, contents=True, deleted='false', details=None)
|
|
122 for contents_dict in history_dict:
|
|
123 if contents_dict['history_content_type'] == 'dataset':
|
|
124 dataset_name = contents_dict['name']
|
|
125 # Don't include the "Queue genotype workflow" dataset.
|
|
126 if dataset_name.startswith("Queue genotype workflow"):
|
|
127 continue
|
|
128 history_datasets[dataset_name] = contents_dict
|
|
129 return history_datasets
|
|
130
|
|
131
|
|
132 def get_library_dataset_file_path(gi, library_id, dataset_id, outputfh):
|
|
133 dataset_dict = gi.libraries.show_dataset(library_id, dataset_id)
|
|
134 outputfh.write("\nReturning file path of library dataset.\n")
|
|
135 return dataset_dict.get('file_name', None)
|
|
136
|
|
137
|
|
138 def get_library_dataset_id_by_name(gi, data_lib_id, dataset_name, outputfh):
|
|
139 # Use the Galaxy API to get the all_genotyped_samples.vcf dataset id.
|
|
140 # We're assuming it is in the root folder.
|
|
141 outputfh.write("\nSearching for library dataset named %s.\n" % str(dataset_name))
|
|
142 lib_item_dicts = gi.libraries.show_library(data_lib_id, contents=True)
|
|
143 for lib_item_dict in lib_item_dicts:
|
|
144 if lib_item_dict['type'] == 'file':
|
|
145 dataset_name = lib_item_dict['name'].lstrip('/').lower()
|
|
146 if dataset_name.startswith(dataset_name):
|
|
147 outputfh.write("Found dataset named %s.\n" % str(dataset_name))
|
|
148 return lib_item_dict['id']
|
|
149 return None
|
|
150
|
|
151
|
|
152 def get_value_from_config(config_defaults, value):
|
|
153 return config_defaults.get(value, None)
|
|
154
|
|
155
|
|
156 def get_workflow(gi, name, outputfh, galaxy_base_url=None, api_key=None):
|
|
157 outputfh.write("\nSearching for workflow named %s\n" % name)
|
|
158 workflow_info_dicts = gi.workflows.get_workflows(name=name, published=True)
|
|
159 if len(workflow_info_dicts) == 0:
|
|
160 return None, None
|
|
161 wf_info_dict = workflow_info_dicts[0]
|
|
162 workflow_id = wf_info_dict['id']
|
|
163 # Get the complete workflow.
|
|
164 workflow_dict = gi.workflows.show_workflow(workflow_id)
|
|
165 outputfh.write("Found workflow named %s.\n" % name)
|
|
166 return workflow_id, workflow_dict
|
|
167
|
|
168
|
|
169 def get_workflow_input_datasets(gi, history_datasets, workflow_name, workflow_dict, outputfh):
|
|
170 # Map the history datasets to the input datasets for the workflow.
|
|
171 workflow_inputs = {}
|
|
172 outputfh.write("\nMapping datasets from history to workflow %s.\n" % workflow_name)
|
|
173 steps_dict = workflow_dict.get('steps', None)
|
|
174 if steps_dict is not None:
|
4
|
175 for step_index, step_dict in list(steps_dict.items()):
|
0
|
176 # Dicts that define dataset inputs for a workflow
|
|
177 # look like this.
|
|
178 # "0": {
|
|
179 # "tool_id": null,
|
|
180 # "tool_version": null,
|
|
181 # "id": 0,
|
|
182 # "input_steps": {},
|
|
183 # "tool_inputs": {},
|
|
184 # "type": "data_input",
|
|
185 # "annotation": null
|
|
186 # },
|
|
187 tool_id = step_dict.get('tool_id', None)
|
|
188 tool_type = step_dict.get('type', None)
|
|
189 # This requires the workflow input dataset annotation to be a
|
|
190 # string # (e.g., report) that enables it to be appropriatey
|
|
191 # matched to a dataset (e.g., axiongt1_report.txt).
|
|
192 # 1. affy_metadata.tabular - must have the word "metadata" in
|
|
193 # the file name.
|
|
194 # 2. sample_attributes.tabular - must have the word "attributes"
|
|
195 # in the file name.
|
|
196 # 3. probeset_annotation.csv - must have the word "annotation" in
|
|
197 # the file name.
|
|
198 # 4. <summary file>.txt - must have the the word "summary" in the
|
|
199 # file name.
|
|
200 # 5. <snp-posteriors file>.txt - must have the the word
|
|
201 # "snp-posteriors" in the file name.
|
|
202 # 6. <report file>.txt - must have the the word "report" in the
|
|
203 # file name.
|
|
204 # 7. <confidences file>.txt - must have the the word "confidences"
|
|
205 # in the file name.
|
|
206 # 8. <calls file>.txt - must have the the word "calls" in the
|
|
207 # file name.
|
|
208 # 9. all_genotyped_samples.vcf - must have "all_genotyped_samples"
|
|
209 # in the file name.
|
|
210 annotation = step_dict.get('annotation', None)
|
|
211 if tool_id is None and tool_type == 'data_input' and annotation is not None:
|
|
212 annotation_check = annotation.lower()
|
|
213 # inputs is a list and workflow input datasets
|
|
214 # have no inputs.
|
4
|
215 for input_hda_name, input_hda_dict in list(history_datasets.items()):
|
0
|
216 input_hda_name_check = input_hda_name.lower()
|
|
217 if input_hda_name_check.find(annotation_check) >= 0:
|
|
218 workflow_inputs[step_index] = {'src': 'hda', 'id': input_hda_dict['id']}
|
|
219 outputfh.write(" - Mapped dataset %s from history to workflow input dataset with annotation %s.\n" % (input_hda_name, annotation))
|
|
220 break
|
|
221 return workflow_inputs
|
|
222
|
|
223
|
|
224 def start_workflow(gi, workflow_id, workflow_name, inputs, params, history_id, outputfh):
|
|
225 outputfh.write("\nExecuting workflow %s.\n" % workflow_name)
|
|
226 workflow_invocation_dict = gi.workflows.invoke_workflow(workflow_id, inputs=inputs, params=params, history_id=history_id)
|
|
227 outputfh.write("Response from executing workflow %s:\n" % workflow_name)
|
|
228 outputfh.write("%s\n" % str(workflow_invocation_dict))
|
|
229
|
|
230
|
|
231 def rename_library_dataset(gi, dataset_id, name, outputfh):
|
|
232 outputfh.write("\nRenaming library dataset with id %s to be named %s.\n" % (str(dataset_id), str(name)))
|
|
233 library_dataset_dict = gi.libraries.update_library_dataset(dataset_id, name=name)
|
|
234 return library_dataset_dict
|
|
235
|
|
236
|
6
|
237 def update_workflow_params(workflow_dict, dbkey, output_nj_phylogeny_tree, outputfh):
|
0
|
238 parameter_updates = None
|
|
239 name = workflow_dict['name']
|
|
240 outputfh.write("\nChecking for tool parameter updates for workflow %s using dbkey %s.\n" % (name, dbkey))
|
|
241 step_dicts = workflow_dict.get('steps', None)
|
4
|
242 for step_id, step_dict in list(step_dicts.items()):
|
0
|
243 tool_id = step_dict['tool_id']
|
|
244 if tool_id is None:
|
|
245 continue
|
|
246 # Handle reference_source entries
|
|
247 if tool_id.find('affy2vcf') > 0:
|
|
248 tool_inputs_dict = step_dict['tool_inputs']
|
|
249 # The queue_genotype_workflow tool provides a selection of only
|
|
250 # a locally cached reference genome (not a history item), so dbkey
|
|
251 # will always refer to a locally cached genome.
|
|
252 # The affy2vcf tool allows the user to select either a locally
|
|
253 # cached reference genome or a history item, but the workflow is
|
|
254 # defined to use a locally cached reference genome by default.
|
|
255 reference_genome_source_cond_dict = tool_inputs_dict['reference_genome_source_cond']
|
|
256 # The value of reference_genome_source_cond_dict['reference_genome_source']
|
|
257 # will always be 'cached'.
|
|
258 workflow_db_key = reference_genome_source_cond_dict['locally_cached_item']
|
|
259 if dbkey != workflow_db_key:
|
|
260 reference_genome_source_cond_dict['locally_cached_item'] = dbkey
|
6
|
261 if parameter_updates is None:
|
|
262 parameter_updates = {}
|
0
|
263 parameter_updates[step_id] = reference_genome_source_cond_dict
|
|
264 outputfh.write("Updated step id %s with the following entry:\n%s\n" % (step_id, str(reference_genome_source_cond_dict)))
|
6
|
265 if tool_id.find('coral_multilocus_genotype') > 0 and output_nj_phylogeny_tree == 'yes':
|
|
266 # Reset the default value 'no' of output_nj_phylogeny_tree to 'yes'.
|
|
267 if parameter_updates is None:
|
|
268 parameter_updates = {}
|
10
|
269 output_nj_phylogeny_tree_dict = {'output_nj_phylogeny_tree': 'yes'}
|
6
|
270 parameter_updates[step_id] = output_nj_phylogeny_tree_dict
|
|
271 outputfh.write("Updated step id %s with the following entry:\n%s\n" % (step_id, str(output_nj_phylogeny_tree_dict)))
|
0
|
272 return parameter_updates
|
|
273
|
|
274
|
|
275 outputfh = open(args.output, "w")
|
|
276 config_defaults = get_config_settings(args.config_file)
|
|
277 user_api_key = open(args.api_key, 'r').read()
|
|
278 admin_api_key = get_value_from_config(config_defaults, 'ADMIN_API_KEY')
|
|
279 galaxy_base_url = get_value_from_config(config_defaults, 'GALAXY_BASE_URL')
|
|
280 gi = galaxy.GalaxyInstance(url=galaxy_base_url, key=user_api_key)
|
|
281 ags_dataset_name = get_value_from_config(config_defaults, 'ALL_GENOTYPED_SAMPLES_DATASET_NAME')
|
|
282 ags_library_name = get_value_from_config(config_defaults, 'ALL_GENOTYPED_SAMPLES_LIBRARY_NAME')
|
|
283 ags_storage_dir = get_value_from_config(config_defaults, 'ALL_GENOTYPED_SAMPLES_STORAGE_DIR')
|
|
284 coralsnp_workflow_name = get_value_from_config(config_defaults, 'CORALSNP_WORKFLOW_NAME')
|
|
285 es_workflow_name = get_value_from_config(config_defaults, 'ENSURE_SYNCED_WORKFLOW_NAME')
|
12
|
286 fags_workflow_name = get_value_from_config(config_defaults, 'FILTER_ALL_GENOTYPED_SAMPLES_WORKFLOW_NAME')
|
0
|
287 vam_workflow_name = get_value_from_config(config_defaults, 'VALIDATE_AFFY_METADATA_WORKFLOW_NAME')
|
|
288
|
|
289 affy_metadata_is_valid = False
|
|
290 datasets_have_queued = False
|
12
|
291 filtered = False
|
0
|
292 stag_database_updated = False
|
|
293 synced = False
|
|
294 lock = threading.Lock()
|
|
295 lock.acquire(True)
|
|
296 try:
|
|
297 # Get the current history datasets. At this point, the
|
|
298 # history will ideally contain only the datasets to be
|
|
299 # used as inputs to the 3 workflows, EnsureSynced,
|
|
300 # ValidateAffyMetadata and CoralSNP.
|
|
301 history_datasets = get_history_datasets(gi, args.history_id)
|
|
302
|
|
303 # Get the All Genotyped Samples data library.
|
|
304 ags_data_library_dict = get_data_library_dict(gi, ags_library_name, outputfh)
|
|
305 ags_library_id = ags_data_library_dict['id']
|
|
306 # Get the public all_genotyped_samples.vcf library dataset id.
|
|
307 ags_ldda_id = get_library_dataset_id_by_name(gi, ags_library_id, ags_dataset_name, outputfh)
|
|
308
|
|
309 # Import the public all_genotyped_samples dataset from
|
|
310 # the data library to the current history.
|
|
311 history_datasets = add_library_dataset_to_history(gi, args.history_id, ags_ldda_id, history_datasets, outputfh)
|
|
312 outputfh.write("\nSleeping for 5 seconds...\n")
|
|
313 time.sleep(5)
|
|
314
|
|
315 # Get the EnsureSynced workflow
|
|
316 es_workflow_id, es_workflow_dict = get_workflow(gi, es_workflow_name, outputfh)
|
|
317 outputfh.write("\nEnsureSynced workflow id: %s\n" % str(es_workflow_id))
|
|
318 # Map the history datasets to the input datasets for
|
|
319 # the EnsureSynced workflow.
|
|
320 es_workflow_input_datasets = get_workflow_input_datasets(gi, history_datasets, es_workflow_name, es_workflow_dict, outputfh)
|
|
321 # Start the EnsureSynced workflow.
|
|
322 start_workflow(gi, es_workflow_id, es_workflow_name, es_workflow_input_datasets, None, args.history_id, outputfh)
|
|
323 outputfh.write("\nSleeping for 15 seconds...\n")
|
|
324 time.sleep(15)
|
|
325 # Poll the history datasets, checking the statuses, and wait until
|
|
326 # the workflow is finished. The workflow itself simply schedules
|
|
327 # all of the jobs, so it cannot be checked for a state.
|
|
328 while True:
|
|
329 history_status_dict = get_history_status(gi, args.history_id)
|
|
330 sd_dict = history_status_dict['state_details']
|
|
331 outputfh.write("\nsd_dict: %s\n" % str(sd_dict))
|
|
332 # The queue_genotype_workflow tool will continue to be in a
|
|
333 # "running" state while inside this for loop, so we know that
|
|
334 # the workflow has completed if only 1 dataset has this state.
|
|
335 if sd_dict['running'] <= 1:
|
|
336 if sd_dict['error'] == 0:
|
|
337 # The all_genotyped_samples.vcf file is
|
|
338 # in sync with the stag database.
|
|
339 synced = True
|
|
340 break
|
|
341 outputfh.write("\nSleeping for 5 seconds...\n")
|
|
342 time.sleep(5)
|
|
343
|
|
344 if synced:
|
|
345 # Get the ValidateAffyMetadata workflow.
|
|
346 vam_workflow_id, vam_workflow_dict = get_workflow(gi, vam_workflow_name, outputfh)
|
|
347 outputfh.write("\nValidateAffyMetadata workflow id: %s\n" % str(vam_workflow_id))
|
|
348 # Map the history datasets to the input datasets for
|
|
349 # the ValidateAffyMetadata workflow.
|
|
350 vam_workflow_input_datasets = get_workflow_input_datasets(gi, history_datasets, vam_workflow_name, vam_workflow_dict, outputfh)
|
|
351 # Start the ValidateAffyMetadata workflow.
|
|
352 start_workflow(gi, vam_workflow_id, vam_workflow_name, vam_workflow_input_datasets, None, args.history_id, outputfh)
|
|
353 outputfh.write("\nSleeping for 15 seconds...\n")
|
|
354 time.sleep(15)
|
|
355 # Poll the history datasets, checking the statuses, and wait until
|
|
356 # the workflow is finished.
|
|
357 while True:
|
|
358 history_status_dict = get_history_status(gi, args.history_id)
|
|
359 sd_dict = history_status_dict['state_details']
|
|
360 outputfh.write("\nsd_dict: %s\n" % str(sd_dict))
|
|
361 # The queue_genotype_workflow tool will continue to be in a
|
|
362 # "running" state while inside this for loop, so we know that
|
|
363 # the workflow has completed if only 1 dataset has this state.
|
|
364 if sd_dict['running'] <= 1:
|
|
365 if sd_dict['error'] == 0:
|
|
366 # The metadata is valid.
|
|
367 affy_metadata_is_valid = True
|
|
368 break
|
|
369 outputfh.write("\nSleeping for 5 seconds...\n")
|
|
370 time.sleep(5)
|
|
371 else:
|
|
372 outputfh.write("\nProcessing ended in error...\n")
|
|
373 outputfh.close()
|
|
374 lock.release()
|
|
375 sys.exit(1)
|
|
376
|
|
377 if affy_metadata_is_valid:
|
|
378 # Get the CoralSNP workflow.
|
|
379 coralsnp_workflow_id, coralsnp_workflow_dict = get_workflow(gi, coralsnp_workflow_name, outputfh)
|
|
380 outputfh.write("\nCoralSNP workflow id: %s\n" % str(coralsnp_workflow_id))
|
|
381 # Map the history datasets to the input datasets for
|
|
382 # the CoralSNP workflow.
|
|
383 coralsnp_workflow_input_datasets = get_workflow_input_datasets(gi, history_datasets, coralsnp_workflow_name, coralsnp_workflow_dict, outputfh)
|
|
384 outputfh.write("\nCoralSNP workflow input datasets: %s\n" % str(coralsnp_workflow_input_datasets))
|
|
385 # Get the CoralSNP workflow params that could be updated.
|
6
|
386 coralsnp_params = update_workflow_params(coralsnp_workflow_dict, args.dbkey, args.output_nj_phylogeny_tree, outputfh)
|
0
|
387 outputfh.write("\nCoralSNP params: %s\n" % str(coralsnp_params))
|
|
388 # Start the CoralSNP workflow.
|
|
389 start_workflow(gi, coralsnp_workflow_id, coralsnp_workflow_name, coralsnp_workflow_input_datasets, coralsnp_params, args.history_id, outputfh)
|
|
390 outputfh.write("\nSleeping for 15 seconds...\n")
|
|
391 time.sleep(15)
|
|
392 # Poll the history datasets, checking the statuses, and wait until
|
|
393 # the workflow is finished. The workflow itself simply schedules
|
|
394 # all of the jobs, so it cannot be checked for a state.
|
|
395 while True:
|
|
396 history_status_dict = get_history_status(gi, args.history_id)
|
|
397 sd_dict = history_status_dict['state_details']
|
12
|
398 outputfh.write("\ndatasets_have_queued: %s\n" % str(datasets_have_queued))
|
0
|
399 outputfh.write("\nsd_dict: %s\n" % str(sd_dict))
|
|
400 # The queue_genotype_workflow tool will continue to be in a
|
|
401 # "running" state while inside this for loop, so we know that
|
|
402 # the workflow has completed if no datasets are in the "new" or
|
|
403 # "queued" state and there is only 1 dataset in the "running"
|
|
404 # state. We cannot filter on datasets in the "paused" state
|
|
405 # because any datasets downstream from one in an "error" state
|
|
406 # will automatically be given a "paused" state. Of course, we'll
|
|
407 # always break if any datasets are in the "error" state. At
|
|
408 # least one dataset must have reached the "queued" state before
|
|
409 # the workflow is complete.
|
|
410 if not datasets_have_queued:
|
|
411 if sd_dict['queued'] > 0:
|
|
412 datasets_have_queued = True
|
|
413 if sd_dict['error'] != 0:
|
|
414 break
|
|
415 if datasets_have_queued and sd_dict['queued'] == 0 and sd_dict['new'] == 0 and sd_dict['running'] <= 1:
|
|
416 # The stag database has been updated.
|
|
417 stag_database_updated = True
|
|
418 break
|
|
419 outputfh.write("\nSleeping for 5 seconds...\n")
|
|
420 time.sleep(5)
|
12
|
421 outputfh.write("\nstag_database_updated: %s\n" % str(stag_database_updated))
|
0
|
422 if stag_database_updated:
|
|
423 # Get the id of the "bcftools merge" dataset in the current history.
|
12
|
424 bcftools_merge = get_history_dataset_id_by_name(gi, args.history_id, "bcftools merge", outputfh)
|
|
425 # Get the FilterAllGenotypedSamples workflow
|
|
426 fags_workflow_id, fags_workflow_dict = get_workflow(gi, fags_workflow_name, outputfh)
|
|
427 outputfh.write("\nFilterAllGenotypedSamples workflow id: %s\n" % str(fags_workflow_id))
|
|
428 # Map the history datasets to the input datasets for
|
|
429 # the FilterAllGenotypedSamples workflow.
|
|
430 fags_workflow_input_datasets = get_workflow_input_datasets(gi, history_datasets, fags_workflow_name, fags_workflow_dict, outputfh)
|
|
431 # Start the FilterAllGenotypedSamples workflow.
|
|
432 start_workflow(gi, fags_workflow_id, fags_workflow_name, fags_workflow_input_datasets, None, args.history_id, outputfh)
|
|
433 outputfh.write("\nSleeping for 15 seconds...\n")
|
|
434 time.sleep(15)
|
|
435 # Poll the history datasets, checking the statuses, and wait until
|
|
436 # the workflow is finished. The workflow itself simply schedules
|
|
437 # all of the jobs, so it cannot be checked for a state.
|
|
438 while True:
|
|
439 history_status_dict = get_history_status(gi, args.history_id)
|
|
440 sd_dict = history_status_dict['state_details']
|
|
441 outputfh.write("\nsd_dict: %s\n" % str(sd_dict))
|
|
442 # The queue_genotype_workflow tool will continue to be in a
|
|
443 # "running" state while inside this for loop, so we know that
|
|
444 # the workflow has completed if only 1 dataset has this state.
|
|
445 if sd_dict['running'] <= 1:
|
|
446 if sd_dict['error'] == 0:
|
|
447 # The all_genotyped_samples.vcf file is filtered.
|
|
448 filtered = True
|
|
449 break
|
|
450 outputfh.write("\nSleeping for 5 seconds...\n")
|
|
451 time.sleep(5)
|
|
452 outputfh.write("\nfiltered: %s\n" % str(filtered))
|
|
453 if filtered:
|
|
454 # Get the id of the "bcftools view" dataset in the current history.
|
|
455 bcftools_view = get_history_dataset_id_by_name(gi, args.history_id, "bcftools view", outputfh)
|
0
|
456 # Create a new dataset in the All Genotyped Samples data library by
|
12
|
457 # importing the "bcftools view" dataset from the current history.
|
0
|
458 # We'll do this as the coraldmin user.
|
|
459 admin_gi = galaxy.GalaxyInstance(url=galaxy_base_url, key=admin_api_key)
|
12
|
460 new_ags_dataset_dict = copy_history_dataset_to_library(admin_gi, ags_library_id, bcftools_view, outputfh)
|
|
461 outputfh.write("\nnew_ags_dataset_dict: %s\n" % str(new_ags_dataset_dict))
|
0
|
462 # Rename the ldda to be all_genotyped_samples.vcf.
|
|
463 new_ags_ldda_id = new_ags_dataset_dict['id']
|
12
|
464 outputfh.write("\nnew_ags_ldda_id: %s\n" % str(new_ags_ldda_id))
|
0
|
465 renamed_ags_dataset_dict = rename_library_dataset(admin_gi, new_ags_ldda_id, ags_dataset_name, outputfh)
|
12
|
466 outputfh.write("\nrenamed_ags_dataset_dict: %s\n" % str(renamed_ags_dataset_dict))
|
0
|
467 # Get the full path of the all_genotyped_samples.vcf library dataset.
|
|
468 ags_ldda_file_path = get_library_dataset_file_path(gi, ags_library_id, ags_ldda_id, outputfh)
|
12
|
469 outputfh.write("\nags_ldda_file_path: %s\n" % str(ags_ldda_file_path))
|
0
|
470 # Copy the all_genotyped_samples.vcf dataset to storage. We
|
|
471 # will only keep a single copy of this file since this tool
|
|
472 # will end in an error before the CoralSNP workflow is started
|
|
473 # if the all_genotyped_samples.vcf file is not sync'd with the
|
|
474 # stag database.
|
|
475 copy_dataset_to_storage(ags_ldda_file_path, ags_storage_dir, ags_dataset_name, outputfh)
|
12
|
476 outputfh.write("\naCopied gs_ldda_file_path: %s to ags_storage_dir %s\n" % (str(ags_ldda_file_path), str(ags_storage_dir)))
|
0
|
477 # Delete the original all_genotyped_samples library dataset.
|
|
478 deleted_dataset_dict = delete_library_dataset(admin_gi, ags_library_id, ags_ldda_id, outputfh)
|
12
|
479 outputfh.write("\ndeleted_dataset_dict: %s\n" % str(deleted_dataset_dict))
|
0
|
480 # To save disk space, delete the all_genotyped_samples hda
|
|
481 # in the current history to enable later purging by an admin.
|
|
482 ags_hda_id = get_history_dataset_id_by_name(gi, args.history_id, "all_genotyped_samples", outputfh)
|
12
|
483 outputfh.write("\nags_hda_id: %s\n" % str(ags_hda_id))
|
0
|
484 delete_history_dataset(gi, args.history_id, ags_hda_id, outputfh)
|
1
|
485 else:
|
|
486 outputfh.write("\nProcessing ended in error...\n")
|
|
487 outputfh.close()
|
|
488 lock.release()
|
|
489 sys.exit(1)
|
0
|
490 else:
|
|
491 outputfh.write("\nProcessing ended in error...\n")
|
|
492 outputfh.close()
|
|
493 lock.release()
|
|
494 sys.exit(1)
|
|
495 except Exception as e:
|
|
496 outputfh.write("Exception preparing or executing either the ValidateAffyMetadata workflow or the CoralSNP workflow:\n%s\n" % str(e))
|
|
497 outputfh.write("\nProcessing ended in error...\n")
|
|
498 outputfh.close()
|
|
499 lock.release()
|
|
500 sys.exit(1)
|
|
501 finally:
|
|
502 lock.release()
|
|
503
|
|
504 outputfh.write("\nFinished processing...\n")
|
|
505 outputfh.close()
|