Mercurial > repos > devteam > concat
comparison operation_filter.py @ 2:d491589307e7 draft
planemo upload for repository https://github.com/galaxyproject/tools-devteam/tree/master/tool_collections/gops/concat commit a1517c9d22029095120643bbe2c8fa53754dd2b7
author | devteam |
---|---|
date | Wed, 11 Nov 2015 12:47:51 -0500 |
parents | 8aa939ace6ba |
children | 32e1c8dac438 |
comparison
equal
deleted
inserted
replaced
1:855580142a12 | 2:d491589307e7 |
---|---|
1 # runs after the job (and after the default post-filter) | 1 # runs after the job (and after the default post-filter) |
2 import os | |
3 from galaxy import eggs | |
4 from galaxy import jobs | |
5 from galaxy.tools.parameters import DataToolParameter | 2 from galaxy.tools.parameters import DataToolParameter |
6 | 3 |
7 from galaxy.jobs.handler import JOB_ERROR | 4 from galaxy.jobs.handler import JOB_ERROR |
8 | 5 |
9 # Older py compatibility | 6 # Older py compatibility |
10 try: | 7 try: |
11 set() | 8 set() |
12 except: | 9 except: |
13 from sets import Set as set | 10 from sets import Set as set |
14 | 11 |
15 #def exec_before_process(app, inp_data, out_data, param_dict, tool=None): | |
16 # """Sets the name of the data""" | |
17 # dbkeys = sets.Set( [data.dbkey for data in inp_data.values() ] ) | |
18 # if len(dbkeys) != 1: | |
19 # raise Exception, '<p><font color="yellow">Both Queries must be from the same genome build</font></p>' | |
20 | 12 |
21 def validate_input( trans, error_map, param_values, page_param_map ): | 13 def validate_input( trans, error_map, param_values, page_param_map ): |
22 dbkeys = set() | 14 dbkeys = set() |
23 data_param_names = set() | 15 data_param_names = set() |
24 data_params = 0 | 16 data_params = 0 |
25 for name, param in page_param_map.iteritems(): | 17 for name, param in page_param_map.iteritems(): |
26 if isinstance( param, DataToolParameter ): | 18 if isinstance( param, DataToolParameter ): |
27 # for each dataset parameter | 19 # for each dataset parameter |
28 if param_values.get(name, None) != None: | 20 if param_values.get(name, None) is not None: |
29 dbkeys.add( param_values[name].dbkey ) | 21 dbkeys.add( param_values[name].dbkey ) |
30 data_params += 1 | 22 data_params += 1 |
31 # check meta data | 23 # check meta data |
32 try: | 24 try: |
33 param = param_values[name] | 25 param = param_values[name] |
34 if isinstance( param.datatype, trans.app.datatypes_registry.get_datatype_by_extension( 'gff' ).__class__ ): | 26 if isinstance( param.datatype, trans.app.datatypes_registry.get_datatype_by_extension( 'gff' ).__class__ ): |
35 # TODO: currently cannot validate GFF inputs b/c they are not derived from interval. | 27 # TODO: currently cannot validate GFF inputs b/c they are not derived from interval. |
36 pass | 28 pass |
37 else: # Validate interval datatype. | 29 else: # Validate interval datatype. |
38 startCol = int( param.metadata.startCol ) | 30 int( param.metadata.startCol ) |
39 endCol = int( param.metadata.endCol ) | 31 int( param.metadata.endCol ) |
40 chromCol = int( param.metadata.chromCol ) | 32 int( param.metadata.chromCol ) |
41 if param.metadata.strandCol is not None: | 33 if param.metadata.strandCol is not None: |
42 strandCol = int ( param.metadata.strandCol ) | 34 int( param.metadata.strandCol ) |
43 else: | |
44 strandCol = 0 | |
45 except: | 35 except: |
46 error_msg = "The attributes of this dataset are not properly set. " + \ | 36 error_msg = "The attributes of this dataset are not properly set. " + \ |
47 "Click the pencil icon in the history item to set the chrom, start, end and strand columns." | 37 "Click the pencil icon in the history item to set the chrom, start, end and strand columns." |
48 error_map[name] = error_msg | 38 error_map[name] = error_msg |
49 data_param_names.add( name ) | 39 data_param_names.add( name ) |
50 if len( dbkeys ) > 1: | 40 if len( dbkeys ) > 1: |
51 for name in data_param_names: | 41 for name in data_param_names: |
52 error_map[name] = "All datasets must belong to same genomic build, " \ | 42 error_map[name] = "All datasets must belong to same genomic build, " \ |
53 "this dataset is linked to build '%s'" % param_values[name].dbkey | 43 "this dataset is linked to build '%s'" % param_values[name].dbkey |
54 if data_params != len(data_param_names): | 44 if data_params != len(data_param_names): |
55 for name in data_param_names: | 45 for name in data_param_names: |
56 error_map[name] = "A dataset of the appropriate type is required" | 46 error_map[name] = "A dataset of the appropriate type is required" |
57 | 47 |
48 | |
58 # Commented out by INS, 5/30/2007. What is the PURPOSE of this? | 49 # Commented out by INS, 5/30/2007. What is the PURPOSE of this? |
59 def exec_after_process(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None): | 50 def exec_after_process(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None): |
60 """Verify the output data after each run""" | 51 """Verify the output data after each run""" |
61 items = out_data.items() | 52 for data in out_data.values(): |
62 | |
63 for name, data in items: | |
64 try: | 53 try: |
65 if stderr and len( stderr ) > 0: | 54 if stderr and len( stderr ) > 0: |
66 raise Exception( stderr ) | 55 raise Exception( stderr ) |
67 | 56 |
68 except Exception, exc: | 57 except Exception: |
69 data.blurb = JOB_ERROR | 58 data.blurb = JOB_ERROR |
70 data.state = JOB_ERROR | 59 data.state = JOB_ERROR |
71 | |
72 ## def exec_after_process(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None): | |
73 ## pass | |
74 | 60 |
75 | 61 |
76 def exec_after_merge(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None): | 62 def exec_after_merge(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None): |
77 exec_after_process( | 63 exec_after_process( |
78 app, inp_data, out_data, param_dict, tool=tool, stdout=stdout, stderr=stderr) | 64 app, inp_data, out_data, param_dict, tool=tool, stdout=stdout, stderr=stderr) |
79 | 65 |
80 # strip strand column if clusters were merged | 66 # strip strand column if clusters were merged |
81 items = out_data.items() | 67 for data in out_data.values(): |
82 for name, data in items: | 68 if param_dict['returntype'] is True: |
83 if param_dict['returntype'] == True: | |
84 data.metadata.chromCol = 1 | 69 data.metadata.chromCol = 1 |
85 data.metadata.startCol = 2 | 70 data.metadata.startCol = 2 |
86 data.metadata.endCol = 3 | 71 data.metadata.endCol = 3 |
87 # merge always clobbers strand | 72 # merge always clobbers strand |
88 data.metadata.strandCol = None | 73 data.metadata.strandCol = None |
89 | 74 |
90 | 75 |
91 def exec_after_cluster(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None): | 76 def exec_after_cluster(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None): |
92 exec_after_process( | 77 exec_after_process( |
93 app, inp_data, out_data, param_dict, tool=tool, stdout=stdout, stderr=stderr) | 78 app, inp_data, out_data, param_dict, tool=tool, stdout=stdout, stderr=stderr) |
94 | 79 |
95 # strip strand column if clusters were merged | 80 # strip strand column if clusters were merged |
96 if param_dict["returntype"] == '1': | 81 if param_dict["returntype"] == '1': |
97 items = out_data.items() | 82 for data in out_data.values(): |
98 for name, data in items: | |
99 data.metadata.strandCol = None | 83 data.metadata.strandCol = None |