0
|
1 #!/usr/bin/python
|
|
2
|
|
3 import sys
|
|
4 import shutil
|
|
5 import re
|
|
6 import urllib2
|
|
7 import subprocess
|
|
8 import gzip
|
|
9 import os
|
|
10 import tempfile
|
|
11 from optparse import OptionParser
|
|
12 from galaxy.util.json import from_json_string, to_json_string
|
|
13
|
|
14 def get_arg():
|
|
15 parser = OptionParser()
|
|
16 parser.add_option("-e", "--ensembl", dest = 'ensembl_info', action = "store", nargs = 2, metavar = ("kingdom", "species_name"), type = "str")
|
|
17 parser.add_option("-o", "--output", dest='output_filename', action="store", nargs = 1, metavar = 'JSON_FILE')
|
7
|
18 parser.add_option("--log", dest='log_filename', action="store", nargs=1, metavar='log_report')
|
0
|
19 (options, args) = parser.parse_args()
|
|
20 return options, args
|
|
21
|
|
22 def cleanup_before_exit(tmp_dir):
|
|
23 if tmp_dir and os.path.exists(tmp_dir):
|
|
24 shutil.rmtree(tmp_dir)
|
|
25
|
|
26 def get_page_content(url):
|
|
27 req = urllib2.Request(url)
|
|
28 page = urllib2.urlopen(req)
|
|
29 return page.read()
|
|
30
|
|
31
|
|
32 def download_file(link, local_file_name):
|
|
33 req = urllib2.Request(link)
|
|
34 src_file = urllib2.urlopen(req)
|
|
35 local_file = open(local_file_name, 'wb')
|
|
36 local_file.write(src_file.read())
|
|
37 local_file.close()
|
|
38
|
|
39 def uncompress_gz(gz_file_name, uncompressed_file_name):
|
7
|
40 print("____________________________________________________________")
|
|
41 print("*** Uncompressing %s" % gz_file_name)
|
0
|
42 uncompressed_file = open(uncompressed_file_name, 'wb')
|
|
43 with gzip.open(gz_file_name, 'rb') as src_file:
|
|
44 uncompressed_file.write(src_file.read())
|
|
45 uncompressed_file.close()
|
7
|
46 print("-> Uncompressed !\n")
|
0
|
47
|
|
48 def add_data_table_entry( data_manager_dict, data_table_entry ):
|
|
49 data_manager_dict['data_tables'] = data_manager_dict.get( 'data_tables', {} )
|
|
50 data_manager_dict['data_tables']['alfa_indexes'] = data_manager_dict['data_tables'].get( 'alfa_indexes', data_table_entry )
|
|
51 return data_manager_dict
|
|
52
|
|
53 def standardize_species_name(species_name):
|
|
54 standard_species_name = re.sub(r'[)]$', '', species_name)
|
|
55 standard_species_name = re.sub(r'[ _),-.(=]+ *', '_', standard_species_name)
|
|
56 return standard_species_name.lower()
|
|
57
|
|
58 def get_ensembl_url_root(kingdom):
|
7
|
59 print("____________________________________________________________")
|
|
60 print("*** Determining Ensembl ftp root url")
|
0
|
61 if kingdom == 'vertebrates':
|
|
62 root = 'ftp://ftp.ensembl.org/pub/current_gtf/'
|
|
63 else:
|
|
64 root = 'ftp://ftp.ensemblgenomes.org/pub/%s/current/' % kingdom
|
7
|
65 print("-> Determined !\n")
|
0
|
66 return root
|
|
67
|
|
68 def test_ensembl_species_exists(kingdom, url, species_name):
|
7
|
69 print("____________________________________________________________")
|
|
70 print ("*** Testing whether %s is referenced in Ensembl %s" % (species_name, kingdom))
|
0
|
71 list_species_file_name = 'species_Ensembl%s%s.txt' % (kingdom[0].upper(), kingdom[1:])
|
15
|
72 print("%s" % kingdom)
|
0
|
73 if kingdom=='vertebrates':
|
|
74 download_file(url, list_species_file_name)
|
|
75 else:
|
|
76 download_file(url + list_species_file_name, list_species_file_name)
|
|
77
|
|
78 grep_result = subprocess.Popen(['grep', species_name, list_species_file_name], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
|
79 species_lines_matched, grep_error = grep_result.communicate()
|
|
80 if grep_error != None or species_lines_matched == "":
|
|
81 msg = 'The species \'%s\' is not referenced on Ensembl (%s)' % (species_name, kingdom)
|
|
82 sys.exit(msg)
|
|
83
|
|
84 species_lines = species_lines_matched.split('\n')
|
|
85 del species_lines[-1]
|
|
86 nb_lines = len(species_lines)
|
|
87
|
|
88 if nb_lines == 1:
|
15
|
89 if kingdom == 'vertebrates':
|
|
90 fields = species_lines[0].split(' ')
|
|
91 columns = fields[-1].split('\r')
|
|
92 found_species_name = columns[0]
|
|
93 else:
|
|
94 columns = species_lines[0].split('\t')
|
|
95 found_species_name = columns[1]
|
0
|
96 if species_name != found_species_name:
|
7
|
97 print('-> \'%s\' has been replace with the complete species name \'%s\'' % (species_name, found_species_name))
|
0
|
98 return found_species_name, species_lines_matched
|
7
|
99 print("-> Referenced !\n")
|
0
|
100 return species_name, species_lines_matched
|
|
101 else:
|
|
102 list_species = [''] * nb_lines
|
|
103 for i in range(0, nb_lines):
|
15
|
104 if kingdom == 'vertebrates':
|
21
|
105 fields = species_lines[i].split(' ')
|
15
|
106 columns = fields[-1].split('\r')
|
|
107 list_species[i] = columns[0]
|
|
108 else:
|
21
|
109 columns = species_lines[i].split('\t')
|
15
|
110 list_species[i] = columns[1]
|
0
|
111 exact_match = re.search('^%s$' % species_name, list_species[i])
|
|
112 if exact_match:
|
7
|
113 print("-> Referenced !\n")
|
0
|
114 return species_name, species_lines[i]
|
|
115 msg = 'The string \'%s\' has been matched against the list of Ensembl Species but is not a complete species name.\nPlease retry with one of the following species names:\n%s' % (species_name, list_species[0:])
|
|
116 sys.exit(msg)
|
|
117
|
|
118 def get_ensembl_collection(kingdom, species_line):
|
7
|
119 print("*** Extracting the %s_collection of the species" % kingdom)
|
0
|
120 collection_regex = re.compile('%s_.+_collection' % kingdom.lower())
|
|
121 collection_match = re.search(collection_regex, species_line)
|
|
122 if not collection_match:
|
7
|
123 print("-> Skiped: this species is not classified in a Ensembl %s collection\n" % kingdom)
|
0
|
124 return None
|
7
|
125 print("-> Extracted !\n")
|
0
|
126 return collection_match.group(0)
|
|
127
|
|
128 def get_ensembl_gtf_archive_name(url_dir, species_name):
|
7
|
129 print("____________________________________________________________")
|
|
130 print("*** Extracting the gtf archive name of %s" % species_name)
|
0
|
131 gtf_archive_regex = re.compile('%s\..*\.[0-9]+\.gtf\.gz' % species_name, flags = re.IGNORECASE)
|
|
132 dir_content = get_page_content(url_dir)
|
|
133 gtf_archive_match = re.search(gtf_archive_regex, dir_content)
|
|
134 if not gtf_archive_match:
|
|
135 sys.exit('The species is referenced on Ensembl but error of nomenclature led to download failure')
|
|
136 gtf_archive_name = gtf_archive_match.group(0)
|
7
|
137 print("-> Extracted !\n")
|
0
|
138 return gtf_archive_name
|
|
139
|
|
140 def get_ensembl_gtf_archive(kingdom, url, species_name, species_line):
|
|
141 if kingdom != 'vertebrates':
|
|
142 url = url + 'gtf/'
|
|
143 if kingdom == 'bacteria' or kingdom == 'protists' or kingdom == 'fungi':
|
|
144 collection = get_ensembl_collection(kingdom, species_line)
|
|
145 if collection != None:
|
|
146 url = url + "%s/" % collection
|
|
147 final_url = url + species_name + '/'
|
|
148 gtf_archive_name = get_ensembl_gtf_archive_name(final_url, species_name)
|
7
|
149 print("____________________________________________________________")
|
|
150 print("*** Download the gtf archive of %s" % species_name)
|
0
|
151 download_file(final_url + gtf_archive_name, gtf_archive_name)
|
7
|
152 print("-> Downloaded !\n")
|
0
|
153 return gtf_archive_name
|
|
154
|
|
155 def generate_alfa_indexes(path_to_alfa, gtf_file_name):
|
7
|
156 print("____________________________________________________________")
|
|
157 print("*** Generating alfa indexes from %s" % gtf_file_name)
|
4
|
158 alfa_result = subprocess.Popen(['python', path_to_alfa, '-a', gtf_file_name], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
0
|
159 alfa_out, alfa_err = alfa_result.communicate()
|
|
160 if alfa_err != None and not re.search('### End of program', alfa_err):
|
|
161 msg = 'Generation Failed due an alfa error: %s' % (alfa_err)
|
|
162 sys.exit(msg)
|
10
|
163 print("Alfa prompt:\n%s" % alfa_out)
|
7
|
164 print("-> Generated !\n")
|
0
|
165
|
|
166 def get_data_table_new_entry(gtf_archive_name):
|
|
167 info_list = gtf_archive_name.split('.')
|
|
168 species = info_list[0]
|
|
169 version = info_list[1]
|
|
170 release = info_list[2]
|
|
171 value = '%s_%s_%s' % (species, version, release)
|
|
172 dbkey = value
|
|
173 name = '%s: %s (release %s)' % (species, version, release)
|
|
174 prefix = '%s.%s.%s' % (species, version, release)
|
|
175 entry_dict = { 'species': species, 'version': version, 'release': release, 'value': value, 'dbkey': dbkey, 'name': name, 'prefix': prefix }
|
|
176 return entry_dict
|
|
177
|
|
178 def main():
|
|
179 options, args = get_arg()
|
15
|
180 tool_dir = args[0]
|
0
|
181
|
4
|
182 path_to_alfa = os.path.join(tool_dir, 'ALFA.py')
|
0
|
183
|
|
184 if options.output_filename == None:
|
|
185 msg = 'No json output file specified'
|
|
186 sys.exit(msg)
|
|
187 output_filename = options.output_filename
|
|
188 params = from_json_string(open(output_filename).read())
|
|
189 target_directory = params['output_data'][0]['extra_files_path']
|
|
190 os.mkdir(target_directory)
|
|
191
|
15
|
192 tmp_dir = tempfile.mkdtemp(prefix='tmp', suffix='')
|
0
|
193 os.chdir(tmp_dir)
|
15
|
194
|
0
|
195 data_manager_dict = {}
|
|
196
|
|
197 if options.ensembl_info:
|
|
198 kingdom, species_name = options.ensembl_info
|
|
199 species_name = standardize_species_name(species_name)
|
|
200 url = get_ensembl_url_root(kingdom)
|
|
201 species_name, species_line = test_ensembl_species_exists(kingdom, url, species_name)
|
|
202 gtf_archive_name = get_ensembl_gtf_archive(kingdom, url, species_name, species_line)
|
|
203 data_table_entry = get_data_table_new_entry(gtf_archive_name)
|
|
204 gtf_file_name = '%s.gtf' % data_table_entry['prefix']
|
|
205 uncompress_gz(gtf_archive_name, gtf_file_name)
|
|
206 generate_alfa_indexes(path_to_alfa, gtf_file_name)
|
|
207 stranded_index_name = '%s.stranded.index' % data_table_entry['prefix']
|
|
208 unstranded_index_name = '%s.unstranded.index' % data_table_entry['prefix']
|
|
209 add_data_table_entry(data_manager_dict, data_table_entry)
|
|
210
|
7
|
211 print("____________________________________________________________")
|
|
212 print("*** General Info")
|
|
213 print("TMP DIR:\t%s" % tmp_dir)
|
|
214 print("TARGET DIR:\t%s" % target_directory)
|
|
215 print("URL ROOT:\t%s" % url)
|
|
216 print("SPECIES:\t%s" % data_table_entry['species'])
|
|
217 print("VERSION:\t%s" % data_table_entry['version'])
|
|
218 print("RELEASE:\t%s" % data_table_entry['release'])
|
|
219 print("VALUE:\t%s" % data_table_entry['value'])
|
|
220 print("DBKEY:\t%s" % data_table_entry['dbkey'])
|
|
221 print("NAME:\t%s" % data_table_entry['name'])
|
|
222 print("PREFIX:\t%s" % data_table_entry['prefix'])
|
|
223 print("____________________________________________________________")
|
|
224 print("*** Intial dictionary")
|
|
225 print("%s" % params)
|
0
|
226
|
6
|
227
|
0
|
228 shutil.copyfile(stranded_index_name, os.path.join(target_directory, stranded_index_name))
|
|
229 shutil.copyfile(unstranded_index_name, os.path.join(target_directory, unstranded_index_name))
|
|
230
|
|
231 cleanup_before_exit(tmp_dir)
|
|
232
|
|
233 open(output_filename, 'wb').write(to_json_string(data_manager_dict))
|
21
|
234 main()
|