# HG changeset patch # User devteam # Date 1447264058 18000 # Node ID e0a23ab32d7fa1a35112133fae2cd5cf0fdf0681 # Parent 2221cf5f59aa04530e7e652683e2f4527779c417 planemo upload for repository https://github.com/galaxyproject/tools-devteam/tree/master/tool_collections/gops/complement commit a1517c9d22029095120643bbe2c8fa53754dd2b7 diff -r 2221cf5f59aa -r e0a23ab32d7f gops_complement.py --- a/gops_complement.py Mon Apr 14 12:34:29 2014 -0400 +++ b/gops_complement.py Wed Nov 11 12:47:38 2015 -0500 @@ -8,27 +8,27 @@ -a, --all: Complement all chromosomes (Genome-wide complement) """ -import sys, traceback, fileinput -from warnings import warn -from bx.intervals import * -from bx.intervals.io import * +import sys +import fileinput +from bx.intervals.io import GenomicInterval, GenomicIntervalReader, NiceReaderWrapper from bx.intervals.operations.complement import complement from bx.intervals.operations.subtract import subtract from bx.cookbook import doc_optparse -from galaxy.tools.util.galaxyops import * +from bx.tabular.io import ParseError +from galaxy.tools.util.galaxyops import fail, parse_cols_arg, skipped assert sys.version_info[:2] >= ( 2, 4 ) + def main(): allchroms = False - upstream_pad = 0 - downstream_pad = 0 options, args = doc_optparse.parse( __doc__ ) try: chr_col_1, start_col_1, end_col_1, strand_col_1 = parse_cols_arg( options.cols1 ) lengths = options.lengths - if options.all: allchroms = True + if options.all: + allchroms = True in_fname, out_fname = args except: doc_optparse.exception() @@ -45,7 +45,7 @@ # dbfile is used to determine the length of each chromosome. The lengths # are added to the lens dict and passed copmlement operation code in bx. dbfile = fileinput.FileInput( lengths ) - + if dbfile: if not allchroms: try: @@ -60,7 +60,7 @@ for line in dbfile: fields = line.split("\t") end = int(fields[1]) - chroms.append("\t".join([fields[0],"0",str(end)])) + chroms.append("\t".join([fields[0], "0", str(end)])) except: pass diff -r 2221cf5f59aa -r e0a23ab32d7f operation_filter.py --- a/operation_filter.py Mon Apr 14 12:34:29 2014 -0400 +++ b/operation_filter.py Wed Nov 11 12:47:38 2015 -0500 @@ -1,5 +1,4 @@ # runs after the job (and after the default post-filter) -import os from galaxy.tools.parameters import DataToolParameter from galaxy.jobs.handler import JOB_ERROR @@ -10,11 +9,6 @@ except: from sets import Set as set -#def exec_before_process(app, inp_data, out_data, param_dict, tool=None): -# """Sets the name of the data""" -# dbkeys = sets.Set( [data.dbkey for data in inp_data.values() ] ) -# if len(dbkeys) != 1: -# raise Exception, '

Both Queries must be from the same genome build

' def validate_input( trans, error_map, param_values, page_param_map ): dbkeys = set() @@ -23,7 +17,7 @@ for name, param in page_param_map.iteritems(): if isinstance( param, DataToolParameter ): # for each dataset parameter - if param_values.get(name, None) != None: + if param_values.get(name, None) is not None: dbkeys.add( param_values[name].dbkey ) data_params += 1 # check meta data @@ -32,17 +26,15 @@ if isinstance( param.datatype, trans.app.datatypes_registry.get_datatype_by_extension( 'gff' ).__class__ ): # TODO: currently cannot validate GFF inputs b/c they are not derived from interval. pass - else: # Validate interval datatype. - startCol = int( param.metadata.startCol ) - endCol = int( param.metadata.endCol ) - chromCol = int( param.metadata.chromCol ) + else: # Validate interval datatype. + int( param.metadata.startCol ) + int( param.metadata.endCol ) + int( param.metadata.chromCol ) if param.metadata.strandCol is not None: - strandCol = int ( param.metadata.strandCol ) - else: - strandCol = 0 + int( param.metadata.strandCol ) except: error_msg = "The attributes of this dataset are not properly set. " + \ - "Click the pencil icon in the history item to set the chrom, start, end and strand columns." + "Click the pencil icon in the history item to set the chrom, start, end and strand columns." error_map[name] = error_msg data_param_names.add( name ) if len( dbkeys ) > 1: @@ -53,38 +45,33 @@ for name in data_param_names: error_map[name] = "A dataset of the appropriate type is required" + # Commented out by INS, 5/30/2007. What is the PURPOSE of this? def exec_after_process(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None): """Verify the output data after each run""" - items = out_data.items() - - for name, data in items: + for data in out_data.values(): try: if stderr and len( stderr ) > 0: raise Exception( stderr ) - except Exception, exc: + except Exception: data.blurb = JOB_ERROR data.state = JOB_ERROR -## def exec_after_process(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None): -## pass - def exec_after_merge(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None): exec_after_process( app, inp_data, out_data, param_dict, tool=tool, stdout=stdout, stderr=stderr) # strip strand column if clusters were merged - items = out_data.items() - for name, data in items: - if param_dict['returntype'] == True: + for data in out_data.values(): + if param_dict['returntype'] is True: data.metadata.chromCol = 1 data.metadata.startCol = 2 data.metadata.endCol = 3 # merge always clobbers strand data.metadata.strandCol = None - + def exec_after_cluster(app, inp_data, out_data, param_dict, tool=None, stdout=None, stderr=None): exec_after_process( @@ -92,6 +79,5 @@ # strip strand column if clusters were merged if param_dict["returntype"] == '1': - items = out_data.items() - for name, data in items: + for data in out_data.values(): data.metadata.strandCol = None diff -r 2221cf5f59aa -r e0a23ab32d7f tool_dependencies.xml --- a/tool_dependencies.xml Mon Apr 14 12:34:29 2014 -0400 +++ b/tool_dependencies.xml Wed Nov 11 12:47:38 2015 -0500 @@ -1,9 +1,9 @@ - + - +