Mercurial > repos > devteam > complement
comparison operation_filter.py @ 3:e0a23ab32d7f draft
planemo upload for repository https://github.com/galaxyproject/tools-devteam/tree/master/tool_collections/gops/complement commit a1517c9d22029095120643bbe2c8fa53754dd2b7
author | devteam |
---|---|
date | Wed, 11 Nov 2015 12:47:38 -0500 |
parents | d958d5a0d1e8 |
children | 38c8bb402872 |
comparison
equal
deleted
inserted
replaced
2:2221cf5f59aa | 3:e0a23ab32d7f |
---|---|
1 # runs after the job (and after the default post-filter) | 1 # runs after the job (and after the default post-filter) |
2 import os | |
3 from galaxy.tools.parameters import DataToolParameter | 2 from galaxy.tools.parameters import DataToolParameter |
4 | 3 |
5 from galaxy.jobs.handler import JOB_ERROR | 4 from galaxy.jobs.handler import JOB_ERROR |
6 | 5 |
7 # Older py compatibility | 6 # Older py compatibility |
8 try: | 7 try: |
9 set() | 8 set() |
10 except: | 9 except: |
11 from sets import Set as set | 10 from sets import Set as set |
12 | 11 |
13 #def exec_before_process(app, inp_data, out_data, param_dict, tool=None): | |
14 # """Sets the name of the data""" | |
15 # dbkeys = sets.Set( [data.dbkey for data in inp_data.values() ] ) | |
16 # if len(dbkeys) != 1: | |
17 # raise Exception, '<p><font color="yellow">Both Queries must be from the same genome build</font></p>' | |
18 | 12 |
19 def validate_input( trans, error_map, param_values, page_param_map ): | 13 def validate_input( trans, error_map, param_values, page_param_map ): |
20 dbkeys = set() | 14 dbkeys = set() |
21 data_param_names = set() | 15 data_param_names = set() |
22 data_params = 0 | 16 data_params = 0 |
23 for name, param in page_param_map.iteritems(): | 17 for name, param in page_param_map.iteritems(): |
24 if isinstance( param, DataToolParameter ): | 18 if isinstance( param, DataToolParameter ): |
25 # for each dataset parameter | 19 # for each dataset parameter |
26 if param_values.get(name, None) != None: | 20 if param_values.get(name, None) is not None: |
27 dbkeys.add( param_values[name].dbkey ) | 21 dbkeys.add( param_values[name].dbkey ) |
28 data_params += 1 | 22 data_params += 1 |
29 # check meta data | 23 # check meta data |
30 try: | 24 try: |
31 param = param_values[name] | 25 param = param_values[name] |
32 if isinstance( param.datatype, trans.app.datatypes_registry.get_datatype_by_extension( 'gff' ).__class__ ): | 26 if isinstance( param.datatype, trans.app.datatypes_registry.get_datatype_by_extension( 'gff' ).__class__ ): |
33 # TODO: currently cannot validate GFF inputs b/c they are not derived from interval. | 27 # TODO: currently cannot validate GFF inputs b/c they are not derived from interval. |
34 pass | 28 pass |
35 else: # Validate interval datatype. | 29 else: # Validate interval datatype. |
36 startCol = int( param.metadata.startCol ) | 30 int( param.metadata.startCol ) |
37 endCol = int( param.metadata.endCol ) | 31 int( param.metadata.endCol ) |
38 chromCol = int( param.metadata.chromCol ) | 32 int( param.metadata.chromCol ) |
39 if param.metadata.strandCol is not None: | 33 if param.metadata.strandCol is not None: |
40 strandCol = int ( param.metadata.strandCol ) | 34 int( param.metadata.strandCol ) |
41 else: | |
42 strandCol = 0 | |
43 except: | 35 except: |
44 error_msg = "The attributes of this dataset are not properly set. " + \ | 36 error_msg = "The attributes of this dataset are not properly set. " + \ |
45 "Click the pencil icon in the history item to set the chrom, start, end and strand columns." | 37 "Click the pencil icon in the history item to set the chrom, start, end and strand columns." |
46 error_map[name] = error_msg | 38 error_map[name] = error_msg |
47 data_param_names.add( name ) | 39 data_param_names.add( name ) |
48 if len( dbkeys ) > 1: | 40 if len( dbkeys ) > 1: |
49 for name in data_param_names: | 41 for name in data_param_names: |
50 error_map[name] = "All datasets must belong to same genomic build, " \ | 42 error_map[name] = "All datasets must belong to same genomic build, " \ |
51 "this dataset is linked to build '%s'" % param_values[name].dbkey | 43 "this dataset is linked to build '%s'" % param_values[name].dbkey |
52 if data_params != len(data_param_names): | 44 if data_params != len(data_param_names): |
53 for name in data_param_names: | 45 for name in data_param_names: |
54 error_map[name] = "A dataset of the appropriate type is required" | 46 error_map[name] = "A dataset of the appropriate type is required" |
55 | 47 |
48 | |
56 # Commented out by INS, 5/30/2007. What is the PURPOSE of this? | 49 # Commented out by INS, 5/30/2007. What is the PURPOSE of this? |
57 def exec_after_process(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None): | 50 def exec_after_process(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None): |
58 """Verify the output data after each run""" | 51 """Verify the output data after each run""" |
59 items = out_data.items() | 52 for data in out_data.values(): |
60 | |
61 for name, data in items: | |
62 try: | 53 try: |
63 if stderr and len( stderr ) > 0: | 54 if stderr and len( stderr ) > 0: |
64 raise Exception( stderr ) | 55 raise Exception( stderr ) |
65 | 56 |
66 except Exception, exc: | 57 except Exception: |
67 data.blurb = JOB_ERROR | 58 data.blurb = JOB_ERROR |
68 data.state = JOB_ERROR | 59 data.state = JOB_ERROR |
69 | |
70 ## def exec_after_process(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None): | |
71 ## pass | |
72 | 60 |
73 | 61 |
74 def exec_after_merge(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None): | 62 def exec_after_merge(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None): |
75 exec_after_process( | 63 exec_after_process( |
76 app, inp_data, out_data, param_dict, tool=tool, stdout=stdout, stderr=stderr) | 64 app, inp_data, out_data, param_dict, tool=tool, stdout=stdout, stderr=stderr) |
77 | 65 |
78 # strip strand column if clusters were merged | 66 # strip strand column if clusters were merged |
79 items = out_data.items() | 67 for data in out_data.values(): |
80 for name, data in items: | 68 if param_dict['returntype'] is True: |
81 if param_dict['returntype'] == True: | |
82 data.metadata.chromCol = 1 | 69 data.metadata.chromCol = 1 |
83 data.metadata.startCol = 2 | 70 data.metadata.startCol = 2 |
84 data.metadata.endCol = 3 | 71 data.metadata.endCol = 3 |
85 # merge always clobbers strand | 72 # merge always clobbers strand |
86 data.metadata.strandCol = None | 73 data.metadata.strandCol = None |
87 | 74 |
88 | 75 |
89 def exec_after_cluster(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None): | 76 def exec_after_cluster(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None): |
90 exec_after_process( | 77 exec_after_process( |
91 app, inp_data, out_data, param_dict, tool=tool, stdout=stdout, stderr=stderr) | 78 app, inp_data, out_data, param_dict, tool=tool, stdout=stdout, stderr=stderr) |
92 | 79 |
93 # strip strand column if clusters were merged | 80 # strip strand column if clusters were merged |
94 if param_dict["returntype"] == '1': | 81 if param_dict["returntype"] == '1': |
95 items = out_data.items() | 82 for data in out_data.values(): |
96 for name, data in items: | |
97 data.metadata.strandCol = None | 83 data.metadata.strandCol = None |