Mercurial > repos > fabio > btman
changeset 0:315246810bfa draft
Uploaded 20180404
author | fabio |
---|---|
date | Tue, 03 Apr 2018 20:27:39 -0400 |
parents | |
children | 9a00e3b8c3c0 |
files | ._.shed.yml ._query.py ._query.xml .shed.yml query.py query.xml |
diffstat | 6 files changed, 305 insertions(+), 0 deletions(-) [+] |
line wrap: on
line diff
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/.shed.yml Tue Apr 03 20:27:39 2018 -0400 @@ -0,0 +1,21 @@ +name: btman +owner: iuc +categories: + - Data Source + - Web Services +description: BloomTree Manager +long_description: | + A fast querying tool to identify all publicly available sequenced + samples which express a transcript of interest +remote_repository_url: https://github.com/fabio-cumbo/bloomtree-manager +homepage_url: https://github.com/fabio-cumbo/bloomtree-manager +type: unrestricted +auto_tool_repositories: + name_template: "{{ tool_id }}" + descriptor_template: "Wrapper for BloomTree Manager: {{ tool_name }}." +suite: + name: "btman_suite" + description: "A suite of Galaxy tools designed to work with the BloomTree Manager." + long_description: | + A fast querying tool to identify all publicly available sequenced + samples which express a transcript of interest
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/query.py Tue Apr 03 20:27:39 2018 -0400 @@ -0,0 +1,196 @@ +#!/usr/bin/env python + +# https://github.com/ross/requests-futures +# http://docs.python-requests.org/en/master/user/quickstart/#more-complicated-post-requests + +import sys, os, uuid, optparse, requests, json, time +#from requests_futures.sessions import FuturesSession + +#### NN14 #### +SERVICE_URL = "http://nn14.galaxyproject.org:8080/"; +#service_url = "http://127.0.0.1:8082/"; +QUERY_URL = SERVICE_URL+"tree/1/query"; +STATUS_URL = SERVICE_URL+"status/<query_id>"; +############## +# query delay in seconds +QUERY_DELAY = 30; +############## + +__version__ = "1.0.0"; +VALID_CHARS = '.-()[]0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ ' +# in the case of collections, exitcodes equal to 0 and 1 are not considered errors +ERR_EXIT_CODE = 2; +OK_EXIT_CODE = 0; + +def raiseException( exitcode, message, output_dir_path, errorfilename ): + errorfilepath = os.path.join(output_dir_path, errorfilename+"_txt"); + with open(errorfilepath, 'w') as out: + out.write(message); + sys.exit(exitcode); + +def query_request( options, args, payload ): + output_dir_path = options.outputdir; + # add additional parameters to the payload + #payload["tree_id"] = str(options.treeid); + payload["search_mode"] = str(options.search); + payload["exact_algorithm"] = int(options.exact); + payload["search_threshold"] = float(options.sthreshold); + # set the content type to application/json + headers = {'Content-type': 'application/json'}; + + # create a session + session = requests.Session(); + # make a synchronous post request to the query route + req = session.post(QUERY_URL, headers=headers, json=payload); + resp_code = req.status_code; + #print(str(req.content)+"\n\n"); + if resp_code == requests.codes.ok: + resp_content = str(req.content); + # convert out to json + json_content = json.loads(resp_content); + # retrieve query id + query_id = json_content['query_id']; + query_processed = False; + # results json content + json_status_content = None; + while query_processed is False: + # create a new session + session = requests.Session(); + # make a synchronous get request to the status route + status_query_url = STATUS_URL.replace("<query_id>", query_id); + status_req = session.get(status_query_url); + status_resp_content = str(status_req.content); + #print(status_resp_content+"\n\n"); + # convert out to json + json_status_content = json.loads(status_resp_content); + # take a look at the state + # state attribute is always available + if json_status_content['state'] == 'SUCCESS': + query_processed = True; + break; + elif json_status_content['state'] in ['FAILURE', 'REVOKED']: + return raiseException( ERR_EXIT_CODE, "Query ID: "+str(query_id)+"\nQuery status: "+str(json_status_content['state']), output_dir_path, str(options.errorfile) ); + else: + time.sleep(QUERY_DELAY); # in seconds + + out_file_format = "tabular"; + for block in json_status_content['results']: + seq_id = block['sequence_id']; + # put response block in the output collection + output_file_path = os.path.join(output_dir_path, seq_id + "_" + out_file_format); + accessions_list = ""; + hits_block = block['hits']; + for hit in hits_block: + if type(hit) is dict: # sabutan + accessions_list = accessions_list + hit['accession_number'] + "\t" + hit['score'] + "\n"; + else: # all-some + accessions_list = accessions_list + str(hit) + "\n"; + with open(output_file_path, 'w') as out: + out.write(accessions_list.strip()); + return sys.exit(OK_EXIT_CODE); + else: + return raiseException( ERR_EXIT_CODE, "Unable to query the remote server. Please try again in a while.", output_dir_path, str(options.errorfile) ); + +def query( options, args ): + output_dir_path = options.outputdir; + multiple_data = {}; + comma_sep_file_paths = options.files; + #print("files: "+str(comma_sep_file_paths)+" - "+str(type(comma_sep_file_paths))); + # check if options.files contains at least one file path + if comma_sep_file_paths is not None: + # split file paths + file_paths = comma_sep_file_paths.split(","); + # split file names + comma_sep_file_names = str(options.names); + #print("names: "+str(comma_sep_file_names)); + file_names = comma_sep_file_names.split(","); + for idx, file_path in enumerate(file_paths): + #file_name = file_names[idx]; + with open(file_path, 'r') as content_file: + for line in content_file: + if line.strip() != "": + line_split = line.strip().split("\t"); # split on tab + if len(line_split) == 2: # 0:id , 1:seq , otherwise skip line + seq_id = line_split[0]; + # fix seq_id using valid chars only + seq_id = ''.join(e for e in seq_id if e in VALID_CHARS) + seq_text = line_split[1]; + if seq_id in multiple_data: + return raiseException( ERR_EXIT_CODE, "Error: the id '"+seq_id+"' is duplicated", output_dir_path, str(options.errorfile) ); + multiple_data[seq_id] = seq_text; + if len(multiple_data) > 0: + return query_request( options, args, multiple_data ); + #return echo( options, args ); + else: + return raiseException( ERR_EXIT_CODE, "An error has occurred. Please be sure that your input files are valid.", output_dir_path, str(options.errorfile) ); + else: + # try with the sequence in --sequence + text_content = options.sequences; + #print("sequences: "+text_content); + # check if options.sequences contains a list of sequences (one for each row) + if text_content is not None: + text_content = str(text_content); + if text_content.strip(): + # populate a dictionary with the files containing the sequences to query + text_content = text_content.strip().split("__cn__"); # split on new line + for line in text_content: + if line.strip() != "": + line_split = line.strip().split("__tc__"); # split on tab + if len(line_split) == 2: # 0:id , 1:seq , otherwise skip line + seq_id = line_split[0]; + # fix seq_id using valid chars only + seq_id = ''.join(e for e in seq_id if e in VALID_CHARS) + seq_text = line_split[1]; + if seq_id in multiple_data: + return raiseException( ERR_EXIT_CODE, "Error: the id '"+seq_id+"' is duplicated", output_dir_path, str(options.errorfile) ); + multiple_data[seq_id] = seq_text; + if len(multiple_data) > 0: + return query_request( options, args, multiple_data ); + #return echo( options, args ); + else: + return raiseException( ERR_EXIT_CODE, "An error has occurred. Please be sure that your input files are valid.", output_dir_path, str(options.errorfile) ); + else: + return raiseException( ERR_EXIT_CODE, "You have to insert at least one row formatted as a tab delimited (ID, SEQUENCE) couple", output_dir_path, str(options.errorfile) ); + return ERR_EXIT_CODE; + +def __main__(): + # Parse the command line options + usage = "Usage: query.py --files comma_sep_file_paths --names comma_seq_file_names --sequences sequences_text --search search_mode --exact exact_alg --sthreshold threshold --outputdir output_dir_path"; + parser = optparse.OptionParser(usage = usage); + parser.add_option("-v", "--version", action="store_true", dest="version", + default=False, help="display version and exit") + parser.add_option("-f", "--files", type="string", + action="store", dest="files", help="comma separated files path"); + parser.add_option("-n", "--names", type="string", + action="store", dest="names", help="comma separated names associated to the files specified in --files"); + parser.add_option("-s", "--sequences", type="string", + action="store", dest="sequences", help="contains a list of sequences (one for each row)"); + parser.add_option("-a", "--fasta", type="string", + action="store", dest="fasta", help="contains the content of a fasta file"); + parser.add_option("-x", "--search", type="string", default=0, + action="store", dest="search", help="search mode"); + parser.add_option("-e", "--exact", type="int", default=0, + action="store", dest="exact", help="exact algorithm (required if search is 1 only)"); + parser.add_option("-t", "--sthreshold", type="float", + action="store", dest="sthreshold", help="threshold applied to the search algrithm"); + parser.add_option("-o", "--outputdir", type="string", default="output", + action="store", dest="outputdir", help="output directory (collection) path"); + parser.add_option("-r", "--errorfile", type="string", default="error.log", + action="store", dest="errorfile", help="error file name containing error messages"); + + # TEST + #sequences = 'NM_001169378.2__tc__atttcggatgctttggagggaggaactctagtgctgcattgattggggcgtgtgttaatgatattcccagttcgcatggcgagcatcgattcctggtacgtatgtgggccccttgactcccacttatcgcacttgtcgttcgcaatttgcatgaattccgcttcgtctgaaacgcacttgcgccagacttctccggctggtctgatctggtctgtgatccggtctggtggggcgccagttgcgtttcgagctcatcaccagtcactccgcagtcgcattctgccagaggtctccgatcaagagcgcttctccattcgagattcaaacgcagcgcggtctgacgccgccacatcgagtgaaatccatatcgatggccacattcacacaggacgagatcgacttcctgcgcagccatggcaacgagctgtgtgccaagacctggctgggattgtgggatccgaagcgggctgtgcaccagcaggagcagcgcgaactgatgatggacaagtatgagcggaagcgatactacctggagccggccagtcctcttaagtcgctggccaatgcggtcaacctgaagtcgtctgctccggcgacgaaccacactcagaatggccaccaaaatgggtatgccagcatccatttgacgcctcctgctgcccagcggacctcggccaatggattgcagaaggtggccaactcgtcgagtaactcttctggaaagacctcatcctcgatcagtaggccacactataatcaccagaacaacagccaaaacaacaatcacgatgcctttggcctgggtggcggattgagcagcctgaacagcgccggttccacatccactggagctctttccgacaccagcagttgtgctagcaatggcttcggtgcggactgcgactttgtggctgactttggctcggccaacattttcgacgccacatcggcgcgttccacaggatcgccggcggtgtcgtccgtgtcctcagtgggttccagcaatggctacgccaaggtgcagcccatccgggcagctcatctccagcagcaacagcagttgcagcagcagctgcatcagcagcagctcctcaatggcaatggtcatcagggcactgagaactttgccgacttcgatcacgctcccatctacaatgcagtggctccaccgacttttaacgattggatcagcgactggagcaggcggggcttccacgatcccttcgacgattgcgatgactcgccaccaggtgcccgccctccagcacctgcgccagctcctgctcaagttcccgcagtatcatcaccattgccaaccgtccgagaagaaccagagcttgcgtggaatttttgggaggacgagatgcgaatagaggcgcaggaaaaggagtcccaaactaaacagccggagttgggctactccttttcgattagtactactacgcccctttccccttcgaatcccttcctgccctaccttgtcagtgaggagcagcatcgaaatcatccagagaagccctccttttcgtattcgttgttcagctccatatcaaatagttcgcaagaagatcaggcggatgatcatgagatgaatgttttaaatgccaatttccatgatttctttacgtggagtgctcccttgcagaacggccatacgaccagtccgcccaagggcggaaatgcagcgatggcgcccagtgaggatcgatatgccgctcttaaggatctcgacgagcagctgcgagaactgaaggccagcgaaagcgccacagagacgcccacgcccaccagtggcaatgttcaggccacagatgcctttggtggagccctcaacaacaatccaaatcccttcaagggccagcaacagcagcagctcagcagccatgtggtgaatccattccagcagcagcaacagcagcagcaccagcagaatctctatggccagttgacgctcataccaaatgcctacggcagcagttcccagcagcagatggggcaccatctcctccagcagcagcagcagcaacagcagagcttcttcaacttcaacaacaacgggttcgccatctcgcagggtctgcccaacggctgcggcttcggcagcatgcaacccgctcctgtgatggccaacaatccctttgcagccagcggcgccatgaacaccaacaatccattcttatgagactcaacccgggagaatccgcctcgcgccacctggcagaggcgctgagccagcgaacaaagagcagacgcggaggaaccgaaccgaaattagtccattttactaacaatagcgttaatctatgtatacataatgcacgccggagagcactctttgtgtacatagcccaaatatgtacacccgaaaggctccacgctgacgctagtcctcgcggatggcggaggcggactggggcgttgatatattcttttacatggtaactctactctaacgtttacggatacggatatttgtatttgccgtttgccctagaactctatacttgtactaagcgcccatgaacacttcatccactaacatagctactaatcctcatcctagtggaggatgcagttggtccagacactctgttatttgttttatccatcctcgtacttgtctttgtcccatttagcactttcgttgcggataagaactttgtcagttattgattgtgtggccttaataagattataaaactaaatattataacgtacgactatacatatacggatacagatacagattcagacacagttagtacagatacagatatacatatacgcttttgtacctaatgaattgcttcttgtttccattgctaatcatctgcttttcgtgtgctaattttatacactagtacgtgcgatatcggccgtgcagatagattgctcagctcgcgagtcaagcctcttttggttgcacccacggcagacatttgtacatatactgtctgattgtaagcctcgtgtaatacctccattaacaccactcccccaccacccatccatcgaaccccgaatccatgactcaattcactgctcacatgtccatgcccatgccttaacgtgtcaaacattatcgaagccttaaagttatttaaaactacgaaatttcaataaaaacaaataagaacgctatc'; + #(options, args) = parser.parse_args(['-x', 'rrr', '-t', 0.5, '-s', sequences, '-o', 'collection_content']); + + (options, args) = parser.parse_args(); + if options.version: + print __version__; + else: + # create output dir (collection) + output_dir_path = options.outputdir; + if not os.path.exists(output_dir_path): + os.makedirs(output_dir_path); + + return query( options, args ); + +if __name__ == "__main__": __main__()
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/query.xml Tue Apr 03 20:27:39 2018 -0400 @@ -0,0 +1,88 @@ +<?xml version="1.0"?> +<tool name="BloomTree Manager - Query" id="btman_query" version="1.0.0"> + <description>the Sequence Bloom Tree</description> + <requirements> + <requirement type="package" version="2.7.10">python</requirement> + <requirement type="package" version="2.18.4">requests</requirement> + </requirements> + <command detect_errors="exit_code"> +<![CDATA[ + python '$__tool_directory__/query.py' + + --search 'rrr' + --sthreshold ${sthreshold} + --exact 0 + + #if $conditional_input.inputtype == '0': + #set file_paths = ','.join( [ str( $f ) for $f in $conditional_input.txtfiles ] ) + #if $file_paths is not 'None': + --files '${file_paths}' + #set file_names = ','.join( [ str( $f.name ) for $f in $conditional_input.txtfiles ] ) + --names '${file_names}' + #end if + #elif $conditional_input.inputtype == '1': + --sequences '${conditional_input.sequences}' + #end if + + --outputdir 'collection_content' + --errorfile 'Error Log File' +]]> + </command> + <inputs> + <conditional name="conditional_input"> + <param name="inputtype" type="select" label="Input mode" help="Select a mode based on how do you want to specify the input"> + <option value="0" selected="true">By file</option> + <option value="1">By manually inserted text</option> + </param> + <when value="0"> + <param format="tabular" name="txtfiles" type="data" label="Select files" multiple="true" optional="false" help="Select one or more tabular files containing (ID, TRANSCRIPT) couples for each line. The content of these files will be merged and the result will represent a query to the AllSome Sequence Bloom Tree Search Engine that will return a collection containing a file for each ID. The content of these files as result of the tool will be a list of accession numbers." /> + </when> + <when value="1"> + <param name="sequences" type="text" area="True" size="5x25" label="Manually insert sequences" optional="false" help="Insert a list of (ID, TRANSCRIPT) couples in a tab delimited format, one for each line. The content of this text box will represent a query to the AllSome Sequence Bloom Tree Search Engine that will return a collection containing a file for each ID. The content of these files as result of the tool will be a list of accession numbers." /> + </when> + </conditional> + <param name="sthreshold" size="3" type="float" value="0.7" min="0.0" max="1.0" label="Search threshold" help="This threshold controls the specificity. Lower values will produce more hits to the query. Higher values are more stringent and will produce fewer hits." /> + </inputs> + <outputs> + <collection name="output_collect" type="list" label="AllSome Sequence Bloom Tree Search Collection"> + <discover_datasets pattern="(?P<identifier_0>[^_]+)_(?P<ext>[^_]+)" directory="collection_content" ext="auto" /> + </collection> + </outputs> + + <help><![CDATA[ +This Query tool is part of the BloomTree Manager Framework that allow to rapidly identify all publicly available +sequenced samples which express a transcript of interest. + +---- + +The input for this tool is a list of (ID, TRANSCRIPT) couples, one for each line, +in a tab delimited format:: + + id0 CCAACCAAAGGGAAAACTTTTTTCCGACTTTGGCCTAAAGGGTTTAACGGCCAAGTCAGAAGGGAAAAAGTTGCGCCA + id1 TTAATGACAGGGCCACATGATGTGAAAAAAAATCAGAAACCGAGTCAACGTGAGAAGATAGTACGTACTACCGCAAAT + ... + idn CAATTAATGATAAATATTTTATAAGGTGCGGAAATAAAGTGAGGAATATCTTTTAAATTCAAGTTCAATTCTGAAAGC + +The ID can contain alphanumeric characters in addition to spaces, dots, dashes, and round and square brackets. +Any additional character will be trimmed out. + +The output of the tool is a collection that contains a file for each ID with a list of +accession numbers representing the samples that express one particular transcript. + +---- + +.. class:: infomark + +**Notes** + +This Galaxy tool has been developed by Fabio Cumbo. + +Please visit this GithHub_repository_ for more information about the BloomTree Manager + +.. _GithHub_repository: https://github.com/fabio-cumbo/bloomtree-manager + ]]></help> + + <citations> + <citation type="doi">10.1101/090464</citation> + </citations> +</tool>