comparison query.py @ 4:35593423c2e2 draft

Uploaded 20180131
author fabio
date Wed, 31 Jan 2018 11:28:53 -0500
parents
children 97dd57f81d77
comparison
equal deleted inserted replaced
3:d7b97b60d0ea 4:35593423c2e2
1 #!/usr/bin/env python
2
3 # https://github.com/ross/requests-futures
4 # http://docs.python-requests.org/en/master/user/quickstart/#more-complicated-post-requests
5
6 import os, uuid, optparse, requests, json, time
7 #from requests_futures.sessions import FuturesSession
8
9 #### NN14 ####
10 service_url = "http://nn14.galaxyproject.org:8080/";
11 #service_url = "http://127.0.0.1:8082/";
12 query_url = service_url+"tree/0/query";
13 status_url = service_url+"status/<task_id>";
14 ##############
15
16 def query_request( options, args, payload ):
17 # add additional parameters to the payload
18 #payload["tree_id"] = str(options.treeid);
19 payload["search_mode"] = str(options.search);
20 payload["exact_algorithm"] = int(options.exact);
21 payload["search_threshold"] = float(options.sthreshold);
22 # set the content type to application/json
23 headers = {'Content-type': 'application/json'};
24
25 # create a session
26 session = requests.Session();
27 # make a synchronous post request to the query route
28 req = session.post(query_url, headers=headers, json=payload);
29 resp_code = req.status_code;
30 #print(str(req.content)+"\n\n");
31 if resp_code == requests.codes.ok:
32 resp_content = str(req.content);
33 # convert out to json
34 json_content = json.loads(resp_content);
35 # retrieve task id
36 task_id = json_content['task_id'];
37 task_processed = False;
38 # results json content
39 json_status_content = None;
40 task_status = None;
41 while task_processed is False:
42 # create a new session
43 session = requests.Session();
44 # make a synchronous get request to the status route
45 status_query_url = status_url.replace("<task_id>", task_id);
46 status_req = session.get(status_query_url);
47 status_resp_content = str(status_req.content);
48 #print(status_resp_content+"\n\n");
49 # convert out to json
50 json_status_content = json.loads(status_resp_content);
51 # take a look at the state
52 # state attribute is always available
53 if json_status_content['state'] == 'SUCCESS':
54 task_processed = True;
55 break;
56 elif json_status_content['state'] in ['FAILURE', 'REVOKED']:
57 return "Task status: "+str(json_status_content['state']);
58 else:
59 time.sleep(60); # in seconds
60
61 # get output dir (collection) path
62 output_dir_path = options.outputdir;
63 if not os.path.exists(output_dir_path):
64 os.makedirs(output_dir_path);
65 out_file_format = "txt";
66
67 for block in json_status_content['results']:
68 seq_id = block['sequence_id'];
69 accessions = block['accession_numbers'];
70 # put response block in the output collection
71 output_file_path = os.path.join(output_dir_path, seq_id + "_" + out_file_format);
72 accessions_list = "";
73 for accession_number in accessions:
74 accessions_list = accessions_list + accession_number + "\n";
75 with open(output_file_path, 'w') as out:
76 out.write(accessions_list.strip());
77 else:
78 return "Unable to query the remote server. Please try again in a while.";
79
80 def query( options, args ):
81 multiple_data = {};
82 comma_sep_file_paths = options.files;
83 #print("files: "+str(comma_sep_file_paths)+" - "+str(type(comma_sep_file_paths)));
84 # check if options.files contains at least one file path
85 if comma_sep_file_paths is not None:
86 # split file paths
87 file_paths = comma_sep_file_paths.split(",");
88 # split file names
89 comma_sep_file_names = str(options.names);
90 #print("names: "+str(comma_sep_file_names));
91 file_names = comma_sep_file_names.split(",");
92 for idx, file_path in enumerate(file_paths):
93 #file_name = file_names[idx];
94 with open(file_path, 'r') as content_file:
95 for line in content_file:
96 if line.strip() != "":
97 line_split = line.strip().split("__tc__"); # split on tab
98 if len(line_split) == 2: # 0:id , 1:seq , otherwise skip line
99 seq_id = line_split[0];
100 seq_text = line_split[1];
101 if seq_id in multiple_data:
102 return "Error: the id '"+seq_id+"' is duplicated";
103 multiple_data[seq_id] = seq_text;
104 if len(multiple_data) > 0:
105 return async_request( options, args, multiple_data );
106 #return echo( options, args );
107 else:
108 return "An error has occurred. Please be sure that your input files are valid.";
109 else:
110 # try with the sequence in --sequence
111 text_content = options.sequences;
112 #print("sequences: "+text_content);
113 # check if options.sequences contains a list of sequences (one for each row)
114 if text_content is not None:
115 text_content = str(text_content);
116 if text_content.strip():
117 # populate a dictionary with the files containing the sequences to query
118 text_content = text_content.strip().split("__cn__"); # split on new line
119 for line in text_content:
120 if line.strip() != "":
121 line_split = line.strip().split("__tc__"); # split on tab
122 if len(line_split) == 2: # 0:id , 1:seq , otherwise skip line
123 seq_id = line_split[0];
124 seq_text = line_split[1];
125 if seq_id in multiple_data:
126 return "Error: the id '"+seq_id+"' is duplicated";
127 multiple_data[seq_id] = seq_text;
128 if len(multiple_data) > 0:
129 return async_request( options, args, multiple_data );
130 #return echo( options, args );
131 else:
132 return "An error has occurred. Please be sure that your input files are valid.";
133 else:
134 return "You have to insert at least one row formatted as a tab delimited <id, sequence> touple";
135 return -1;
136
137 def __main__():
138 # Parse the command line options
139 usage = "Usage: query.py --files comma_sep_file_paths --names comma_seq_file_names --sequences sequences_text --search search_mode --exact exact_alg --sthreshold threshold --outputdir output_dir_path";
140 parser = optparse.OptionParser(usage = usage);
141 parser.add_option("-f", "--files", type="string",
142 action="store", dest="files", help="comma separated files path");
143 parser.add_option("-n", "--names", type="string",
144 action="store", dest="names", help="comma separated names associated to the files specified in --files");
145 parser.add_option("-s", "--sequences", type="string",
146 action="store", dest="sequences", help="contains a list of sequences (one for each row)");
147 parser.add_option("-a", "--fasta", type="string",
148 action="store", dest="fasta", help="contains the content of a fasta file");
149 parser.add_option("-x", "--search", type="string", default=0,
150 action="store", dest="search", help="search mode");
151 parser.add_option("-e", "--exact", type="int", default=0,
152 action="store", dest="exact", help="exact algorithm (required if search is 1 only)");
153 parser.add_option("-t", "--sthreshold", type="float",
154 action="store", dest="sthreshold", help="threshold applied to the search algrithm");
155 parser.add_option("-o", "--outputdir", type="string",
156 action="store", dest="outputdir", help="output directory (collection) path");
157
158 #parser.add_option("-k", "--outfile", type="string",
159 #action="store", dest="outfile", help="output file");
160
161 # TEST
162 #--search 'rrr'
163 #--sthreshold 0.5
164 #--exact 0
165 #--sequences 'id0__tc__CAATTAATGATAAATATTTTATAAGGTGCGGAAATAAAGTGAGGAATATCTTTTAAATTCAAGTTCAATTCTGAAAGC'
166 #--outputdir 'collection_content'
167 #sequences = 'id0__tc__CAATTAATGATAAATATTTTATAAGGTGCGGAAATAAAGTGAGGAATATCTTTTAAATTCAAGTTCAATTCTGAAAGC';
168 #print(sequences);
169 #(options, args) = parser.parse_args(['-x', 'rrr', '-t', 0.5, '-s', sequences, '-o', 'collection_content']);
170
171 (options, args) = parser.parse_args();
172 return query( options, args );
173
174 if __name__ == "__main__": __main__()