# HG changeset patch
# User fabio
# Date 1516657310 18000
# Node ID 00d6e82d74e9e4d6963d439933d8740d64c920a5
Uploaded 20180122
diff -r 000000000000 -r 00d6e82d74e9 ._.shed.yml
Binary file ._.shed.yml has changed
diff -r 000000000000 -r 00d6e82d74e9 ._retrieve.py
Binary file ._retrieve.py has changed
diff -r 000000000000 -r 00d6e82d74e9 ._retrieve.xml
Binary file ._retrieve.xml has changed
diff -r 000000000000 -r 00d6e82d74e9 ._search.py
Binary file ._search.py has changed
diff -r 000000000000 -r 00d6e82d74e9 ._search.xml
Binary file ._search.xml has changed
diff -r 000000000000 -r 00d6e82d74e9 .shed.yml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/.shed.yml Mon Jan 22 16:41:50 2018 -0500
@@ -0,0 +1,20 @@
+name: sbtas_se
+owner: iuc
+categories:
+ - Web Services
+ - Data Source
+description: AllSome Sequence Bloom Tree Search Engine
+long_description: |
+ A fast querying tool to search on the Sequence Read Archive repository
+ using Bloom Filters.
+remote_repository_url: https://github.com/fabio-cumbo/bloomtree-allsome-search-engine
+homepage_url: https://github.com/fabio-cumbo/bloomtree-allsome-search-engine
+type: unrestricted
+auto_tool_repositories:
+ name_template: "{{ tool_id }}"
+ descriptor_template: "Wrapper for AllSome Sequence Bloom Tree Search Engine application: {{ tool_name }}."
+suite:
+ name: "sbtas_se_suite"
+ description: "A suite of Galaxy tools designed to interface with the AllSome Sequence Bloom Tree Search Engine APIs."
+ long_description: |
+ Rapid querying of massive sequence datasets
\ No newline at end of file
diff -r 000000000000 -r 00d6e82d74e9 retrieve.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/retrieve.py Mon Jan 22 16:41:50 2018 -0500
@@ -0,0 +1,123 @@
+#!/usr/bin/env python
+
+# NCBI SRA Tools
+# https://galaxyproject.org/tutorials/upload/
+
+import os
+import optparse
+from subprocess import Popen, PIPE
+
+db_key = "?";
+sra_instant_url = "ftp://ftp-trace.ncbi.nlm.nih.gov/sra/sra-instant/reads/ByRun/sra/SRR/";
+
+def convertSRA(tmp_dir, accession_number, data_format):
+ absolute_tmp_dir = os.path.abspath(tmp_dir);
+ sra_file_path = os.path.join(absolute_tmp_dir, accession_number+".sra");
+ if os.path.isdir(absolute_tmp_dir) and os.path.exists(sra_file_path):
+ process = None;
+ if data_format == ".fasta.gz":
+ process = Popen(["fastq-dump", "--fasta", "--gzip", sra_file_path, "--outdir", absolute_tmp_dir], stdout=PIPE);
+ elif data_format == ".fastq.gz":
+ process = Popen(["fastq-dump", "--gzip", sra_file_path, "--outdir", absolute_tmp_dir], stdout=PIPE);
+ elif data_format == ".fasta":
+ process = Popen(["fastq-dump", "--fasta", sra_file_path, "--outdir", absolute_tmp_dir], stdout=PIPE);
+ elif data_format == ".fastq":
+ process = Popen(["fastq-dump", sra_file_path, "--outdir", absolute_tmp_dir], stdout=PIPE);
+ else:
+ process = None;
+ if process is not None:
+ (output, err) = process.communicate();
+ if err:
+ # kill the process
+ # kill_process(process.pid);
+ # remove any trace of the output file
+ an_file_path = os.path.join(tmp_dir, accession_number+data_format);
+ if os.path.exists(an_file_path):
+ os.unlink(an_file_path);
+ # try to restart the process
+ return downloadAccessionData(tmp_dir, accession_number, data_format);
+ #exit_code = process.wait();
+ return os.path.join(tmp_dir, accession_number+data_format);
+ return "";
+
+def downloadAccessionData(accession_number, accession_path, appdata_path, data_format, limit=10):
+ split = accession_number[:6];
+ srr_path = sra_instant_url+split+"/"+accession_number+"/"+accession_number+".sra";
+ sra_file_path = os.path.join(appdata_path, accession_number+".sra");
+ process = Popen(['wget', srr_path, "--output-document="+sra_file_path], stdout=PIPE);
+ (output, err) = process.communicate();
+ if err:
+ # remove any trace of the output file
+ if os.path.exists(an_file_path):
+ os.unlink(an_file_path);
+ # try to restart the process
+ if limit > 0:
+ return downloadAccessionData(accession_number, accession_path, appdata_path, data_format, limit-1);
+ return -1;
+ if os.path.exists(sra_file_path):
+ converted_file_path = convertSRA(appdata_path, accession_number, data_format);
+ if os.path.exists(converted_file_path):
+ os.rename(converted_file_path, accession_path);
+ os.unlink(sra_file_path);
+ return 0;
+
+def process_accessions( options, args ):
+ # create appdata dir if it does not exist
+ appdata_path = options.appdata;
+ if not os.path.exists(appdata_path):
+ os.makedirs(appdata_path);
+ data_format = options.dataformat;
+ '''
+ # Collection test
+ test_file_name = "Test Collection" + "_" + "SRRtest" + "_" + data_format[1:] + "_" + db_key;
+ test_file_path = os.path.join(appdata_path, test_file_name);
+ file = open(test_file_path, "w");
+ file.write("Hello World");
+ file.close();
+ '''
+ # read inputs
+ comma_sep_file_paths = options.files;
+ #print("files: "+str(comma_sep_file_paths)+" - "+str(type(comma_sep_file_paths)));
+ # check if options.files contains at least one file path
+ if comma_sep_file_paths is not None:
+ # split file paths
+ file_paths = comma_sep_file_paths.split(",");
+ # split file names
+ comma_sep_file_names = str(options.names);
+ #print("names: "+str(comma_sep_file_names));
+ file_names = comma_sep_file_names.split(",");
+ # populate a dictionary with the files containing the sequences to query
+ for idx, file_path in enumerate(file_paths):
+ file_name = file_names[idx];
+ #print(file_name + ": " + file_path);
+ with open(file_path) as accessions:
+ for line in accessions:
+ if line.strip() != "" and not line.startswith(">"):
+ accession_number = line.strip();
+ filename_with_collection_prefix = file_name + "_" + accession_number + "_" + data_format[1:] + "_" + db_key;
+ accession_path = os.path.join(appdata_path, filename_with_collection_prefix)
+ # download fastq filte related to accession_number
+ downloadAccessionData( accession_number, accession_path, appdata_path, data_format );
+ return 0;
+
+def __main__():
+ # Parse the command line options
+ usage = "Usage: retrieve.py --files comma_sep_file_paths --names comma_seq_file_names --format data_format --appdata folder_name";
+ parser = optparse.OptionParser(usage = usage);
+ parser.add_option("-f", "--files", type="string",
+ action="store", dest="files", help="comma separated files path");
+ parser.add_option("-n", "--names", type="string",
+ action="store", dest="names", help="comma separated names associated to the files specified in --files");
+ parser.add_option("-e", "--format", type="string",
+ action="store", dest="dataformat", help="data format");
+ parser.add_option("-a", "--appdata", type="string",
+ action="store", dest="appdata", help="appdata folder name");
+ parser.add_option("-v", "--version", action="store_true", dest="version",
+ default=False, help="display version and exit");
+ (options, args) = parser.parse_args();
+ if options.version:
+ print __version__;
+ else:
+ return process_accessions( options, args );
+
+if __name__ == "__main__": __main__()
diff -r 000000000000 -r 00d6e82d74e9 retrieve.xml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/retrieve.xml Mon Jan 22 16:41:50 2018 -0500
@@ -0,0 +1,41 @@
+
+
+ data from SRA
+
+ python
+ sra-tools
+
+
+ ${stdouterr}
+]]>
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff -r 000000000000 -r 00d6e82d74e9 search.py
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/search.py Mon Jan 22 16:41:50 2018 -0500
@@ -0,0 +1,155 @@
+#!/usr/bin/env python
+
+# https://github.com/ross/requests-futures
+# http://docs.python-requests.org/en/master/user/quickstart/#more-complicated-post-requests
+
+import os, uuid
+import optparse
+import requests
+from requests_futures.sessions import FuturesSession
+
+#### UV0 ####
+# proxy to uv0
+#service_url = "http://deputy.bx.psu.edu/";
+# url to query page
+#query_url = service_url+"query.php";
+# url to echo page: just return 'it works!'
+#echo_url = service_url+"echo.php";
+#############
+
+#### NN14 ####
+service_url = "http://nn14.galaxyproject.org:8080/";
+query_url = service_url+"tree/0/query";
+##############
+
+'''
+# synchronous
+def echo( options, args ):
+ # create a session
+ session = requests.Session()
+ # make a sync get request
+ resp = session.get(echo_url)
+ # check for response status code
+ resp_code = resp.status_code;
+ if resp_code == requests.codes.ok:
+ # get output file path
+ output_file_path = options.output;
+ # write response on the output file
+ with open(output_file_path, 'w') as out:
+ #out.write(resp.data);
+ out.write(resp.content);
+ return 0;
+ else:
+ return resp_code;
+'''
+
+# asynchronous
+def async_request( options, args, payload ):
+ # add additional parameters to the payload
+ payload["tree_id"] = str(options.treeid);
+ payload["search_mode"] = str(options.search);
+ payload["exact_algorithm"] = str(options.exact);
+ payload["search_threshold"] = str(options.sthreshold);
+ # create a session
+ session = FuturesSession();
+ # make an async post request with requests-futures
+ future_req = session.post(query_url, data=payload);
+ # wait for the request to complete, if it has not already
+ resp = future_req.result();
+ # check for response status code
+ resp_code = resp.status_code;
+ # get output file path
+ output_file_path = options.output;
+ # write response on the output file
+ with open(output_file_path, 'w') as out:
+ #out.write(resp.data);
+ out.write(str(resp_code)+"\n"+str(resp.content));
+ if resp_code == requests.codes.ok:
+ return 0;
+ else:
+ return resp_code;
+
+def srase_query( options, args ):
+ multiple_files = {};
+ comma_sep_file_paths = options.files;
+ #print("files: "+str(comma_sep_file_paths)+" - "+str(type(comma_sep_file_paths)));
+ # check if options.files contains at least one file path
+ if comma_sep_file_paths is not None:
+ # split file paths
+ file_paths = comma_sep_file_paths.split(",");
+ # split file names
+ comma_sep_file_names = str(options.names);
+ #print("names: "+str(comma_sep_file_names));
+ file_names = comma_sep_file_names.split(",");
+ # populate a dictionary with the files containing the sequences to query
+ for idx, file_path in enumerate(file_paths):
+ file_name = file_names[idx];
+ with open(file_path, 'r') as content_file:
+ content = content_file.read()
+ multiple_files[file_name] = content;
+ #print(file_name+": "+content+"\n");
+ if len(multiple_files) > 0:
+ return async_request( options, args, multiple_files );
+ #return echo( options, args );
+ else:
+ search_mode = str(options.search);
+ text_content = "";
+ if search_mode == "0":
+ # try with the sequence in --sequence
+ text_content = options.sequences;
+ elif search_mode == "1":
+ # try with the fasta content in --fasta
+ text_content = options.fasta;
+ #print("sequences: "+text_content);
+ # check if options.sequences contains a list of sequences (one for each row)
+ if text_content is not None:
+ text_content = str(text_content);
+ if text_content.strip():
+ if search_mode == "0":
+ # populate a dictionary with the files containing the sequences to query
+ seq_counter = 0;
+ sequences_arr = text_content.split("__cn__");
+ for seq in sequences_arr:
+ seq_index = 'sequence'+str(seq_counter);
+ multiple_files[seq_index] = seq;
+ #print(str(seq_counter)+": "+seq);
+ seq_counter += 1;
+ elif search_mode == "1":
+ multiple_files["fasta"] = text_content;
+ return async_request( options, args, multiple_files );
+ #return echo( options, args );
+ else:
+ return -1;
+ return -1;
+
+def __main__():
+ # Parse the command line options
+ usage = "Usage: search.py --files comma_sep_file_paths --names comma_seq_file_names --sequences sequences_text --search search_mode --exact exact_alg --sthreshold threshold --output output_file_path";
+ parser = optparse.OptionParser(usage = usage);
+ parser.add_option("-i", "--treeid", type="string",
+ action="store", dest="treeid", help="string representing the tree id");
+ parser.add_option("-f", "--files", type="string",
+ action="store", dest="files", help="comma separated files path");
+ parser.add_option("-n", "--names", type="string",
+ action="store", dest="names", help="comma separated names associated to the files specified in --files");
+ parser.add_option("-s", "--sequences", type="string",
+ action="store", dest="sequences", help="contains a list of sequences (one for each row)");
+ parser.add_option("-a", "--fasta", type="string",
+ action="store", dest="fasta", help="contains the content of a fasta file");
+ parser.add_option("-x", "--search", type="int", default=0,
+ action="store", dest="search", help="search mode");
+ parser.add_option("-e", "--exact", type="int", default=0,
+ action="store", dest="exact", help="exact algorithm (required if search is 1 only)");
+ parser.add_option("-t", "--sthreshold", type="string",
+ action="store", dest="sthreshold", help="threshold applied to the search algrithm");
+ parser.add_option("-o", "--output", type="string",
+ action="store", dest="output", help="output file path");
+ parser.add_option("-v", "--version", action="store_true", dest="version",
+ default=False, help="display version and exit");
+ (options, args) = parser.parse_args();
+ if options.version:
+ print __version__;
+ else:
+ srase_query( options, args );
+
+if __name__ == "__main__": __main__()
diff -r 000000000000 -r 00d6e82d74e9 search.xml
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/search.xml Mon Jan 22 16:41:50 2018 -0500
@@ -0,0 +1,58 @@
+
+
+ your sequences in the big SRA data lake
+
+ python
+ requests
+ requests-futures
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 10.1101/090464
+
+