0
|
1 #!/usr/bin/env python3
|
22
|
2 import sys
|
0
|
3 import time
|
|
4 import configuration
|
|
5 import os
|
|
6 import textwrap
|
|
7 import subprocess
|
|
8 from tempfile import NamedTemporaryFile
|
|
9 from collections import defaultdict
|
|
10
|
|
11
|
|
12 class Range():
|
|
13 '''
|
|
14 This class is used to check float range in argparse
|
|
15 '''
|
|
16
|
|
17 def __init__(self, start, end):
|
|
18 self.start = start
|
|
19 self.end = end
|
|
20
|
|
21 def __eq__(self, other):
|
|
22 return self.start <= other <= self.end
|
|
23
|
|
24 def __str__(self):
|
|
25 return "float range {}..{}".format(self.start, self.end)
|
|
26
|
|
27 def __repr__(self):
|
|
28 return "float range {}..{}".format(self.start, self.end)
|
|
29
|
|
30
|
|
31 def check_file_start(gff_file):
|
|
32 count_comment = 0
|
|
33 with open(gff_file, "r") as gff_all:
|
|
34 line = gff_all.readline()
|
|
35 while line.startswith("#"):
|
|
36 line = gff_all.readline()
|
|
37 count_comment += 1
|
|
38 return count_comment
|
|
39
|
|
40
|
|
41 def write_info(filt_dom_tmp, FILT_DOM_GFF, orig_class_dict, filt_class_dict,
|
22
|
42 dom_dict, version_lines, TH_IDENTITY, TH_SIMILARITY,
|
|
43 TH_LENGTH, TH_INTERRUPT, TH_LEN_RATIO, SELECTED_DOM):
|
0
|
44 '''
|
|
45 Write domains statistics in beginning of filtered GFF
|
|
46 '''
|
|
47 with open(FILT_DOM_GFF, "w") as filt_gff:
|
|
48 for line in version_lines:
|
|
49 filt_gff.write(line)
|
22
|
50 filt_gff.write(("##Filtering thresholdss: min identity: {}, min similarity: {},"
|
|
51 " min relative alingment length: {}, max interuptions(stop or "
|
|
52 "frameshift): {}, max relative alignment length: {}, selected"
|
|
53 " domains: {} \n").format(TH_IDENTITY,
|
|
54 TH_SIMILARITY,
|
|
55 TH_LENGTH,
|
|
56 TH_INTERRUPT,
|
|
57 TH_LEN_RATIO,
|
|
58 SELECTED_DOM))
|
0
|
59 filt_gff.write("##CLASSIFICATION\tORIGINAL_COUNTS\tFILTERED_COUNTS\n")
|
|
60 if not orig_class_dict:
|
|
61 filt_gff.write("##NO DOMAINS CLASSIFICATIONS\n")
|
|
62 for classification in sorted(orig_class_dict.keys()):
|
|
63 if classification in filt_class_dict.keys():
|
|
64 filt_gff.write("##{}\t{}\t{}\n".format(
|
|
65 classification, orig_class_dict[
|
|
66 classification], filt_class_dict[classification]))
|
|
67 else:
|
|
68 filt_gff.write("##{}\t{}\t{}\n".format(
|
|
69 classification, orig_class_dict[classification], 0))
|
|
70 filt_gff.write("##-----------------------------------------------\n"
|
|
71 "##SEQ\tDOMAIN\tCOUNTS\n")
|
|
72 if not dom_dict:
|
|
73 filt_gff.write("##NO DOMAINS\n")
|
|
74 for seq in sorted(dom_dict.keys()):
|
|
75 for dom, count in sorted(dom_dict[seq].items()):
|
|
76 filt_gff.write("##{}\t{}\t{}\n".format(seq, dom, count))
|
|
77 filt_gff.write("##-----------------------------------------------\n")
|
|
78 with open(filt_dom_tmp.name, "r") as filt_tmp:
|
|
79 for line in filt_tmp:
|
|
80 filt_gff.write(line)
|
|
81
|
|
82
|
|
83 def get_file_start(gff_file):
|
|
84 count_comment = 0
|
|
85 lines = []
|
|
86 with open(gff_file, "r") as gff_all:
|
|
87 line = gff_all.readline()
|
|
88 while line.startswith("#"):
|
|
89 lines.append(line)
|
|
90 line = gff_all.readline()
|
|
91 count_comment += 1
|
|
92 return count_comment, lines
|
|
93
|
|
94
|
15
|
95 def parse_gff_line(line):
|
|
96 '''Return dictionary with gff fields and atributers
|
|
97 Note - type of fields is strings
|
|
98 '''
|
|
99 # order of first 9 column is fixed
|
|
100 gff_line = dict(
|
|
101 zip(
|
|
102 ['seqid', 'source', 'type', 'start', 'end',
|
|
103 'score', 'strand', 'phase', 'attributes'],
|
|
104 line.split("\t")
|
|
105 )
|
|
106 )
|
|
107 # split attributes and replace:
|
|
108 gff_line['attributes'] = dict([i.split("=") for i in gff_line['attributes'].split(";")])
|
|
109 return gff_line
|
|
110
|
0
|
111 def filter_qual_dom(DOM_GFF, FILT_DOM_GFF, TH_IDENTITY, TH_SIMILARITY,
|
|
112 TH_LENGTH, TH_INTERRUPT, TH_LEN_RATIO, SELECTED_DOM,
|
|
113 ELEMENT):
|
|
114 ''' Filter gff output based on domain and quality of alignment '''
|
|
115 [count_comment, version_lines] = get_file_start(DOM_GFF)
|
|
116 filt_dom_tmp = NamedTemporaryFile(delete=False)
|
|
117 with open(DOM_GFF, "r") as gff_all, open(filt_dom_tmp.name,
|
|
118 "w") as gff_filtered:
|
15
|
119 for _ in range(count_comment):
|
0
|
120 next(gff_all)
|
|
121 dom_dict = defaultdict(lambda: defaultdict(int))
|
|
122 orig_class_dict = defaultdict(int)
|
|
123 filt_class_dict = defaultdict(int)
|
|
124 seq_ids_all = []
|
|
125 xminimals = []
|
|
126 xmaximals = []
|
|
127 domains = []
|
|
128 xminimals_all = []
|
|
129 xmaximals_all = []
|
|
130 domains_all = []
|
|
131 start = True
|
|
132 for line in gff_all:
|
17
|
133 gff_line = parse_gff_line(line)
|
|
134 classification = gff_line['attributes']['Final_Classification']
|
0
|
135 orig_class_dict[classification] += 1
|
|
136 ## ambiguous domains filtered out automatically
|
|
137 if classification != configuration.AMBIGUOUS_TAG:
|
15
|
138 al_identity = float(gff_line['attributes']['Identity'])
|
|
139 al_similarity = float(gff_line['attributes']['Similarity'])
|
|
140 al_length = float(gff_line['attributes']['Relat_Length'])
|
|
141 relat_interrupt = float(gff_line['attributes']['Relat_Interruptions'])
|
|
142 db_len_proportion = float(gff_line['attributes']['Hit_to_DB_Length'])
|
17
|
143 dom_type = gff_line['attributes']['Name']
|
15
|
144 seq_id = gff_line['seqid']
|
|
145 xminimal = int(gff_line['start'])
|
|
146 xmaximal = int(gff_line['end'])
|
|
147 c1 = al_identity >= TH_IDENTITY
|
|
148 c2 = al_similarity >= TH_SIMILARITY
|
|
149 if (c1 and c2 and al_length >= TH_LENGTH and relat_interrupt <= TH_INTERRUPT and
|
|
150 db_len_proportion <= TH_LEN_RATIO and
|
|
151 (dom_type == SELECTED_DOM or SELECTED_DOM == "All") and
|
|
152 (ELEMENT in classification)):
|
0
|
153 gff_filtered.writelines(line)
|
|
154 filt_class_dict[classification] += 1
|
|
155 dom_dict[seq_id][dom_type] += 1
|
|
156 if start:
|
|
157 seq_ids_all.append(line.split("\t")[0])
|
|
158 start = False
|
|
159 if seq_id != seq_ids_all[-1]:
|
|
160 seq_ids_all.append(seq_id)
|
|
161 xminimals_all.append(xminimals)
|
|
162 xmaximals_all.append(xmaximals)
|
|
163 domains_all.append(domains)
|
|
164 xminimals = []
|
|
165 xmaximals = []
|
|
166 domains = []
|
|
167 xminimals.append(xminimal)
|
|
168 xmaximals.append(xmaximal)
|
|
169 domains.append(dom_type)
|
|
170 path = os.path.dirname(os.path.realpath(__file__))
|
|
171 write_info(filt_dom_tmp, FILT_DOM_GFF, orig_class_dict, filt_class_dict,
|
22
|
172 dom_dict, version_lines, TH_IDENTITY, TH_SIMILARITY,
|
|
173 TH_LENGTH, TH_INTERRUPT, TH_LEN_RATIO, SELECTED_DOM)
|
0
|
174 os.unlink(filt_dom_tmp.name)
|
|
175 xminimals_all.append(xminimals)
|
|
176 xmaximals_all.append(xmaximals)
|
|
177 domains_all.append(domains)
|
|
178 return xminimals_all, xmaximals_all, domains_all, seq_ids_all
|
|
179
|
|
180
|
|
181 def get_domains_protseq(FILT_DOM_GFF, DOMAIN_PROT_SEQ):
|
|
182 ''' Get the translated protein sequence of original DNA seq for all the filtered domains regions
|
|
183 The translated sequences are taken from alignment reported by LASTAL (Query_Seq attribute in GFF)
|
|
184 '''
|
|
185 count_comment = check_file_start(FILT_DOM_GFF)
|
|
186 with open(FILT_DOM_GFF, "r") as filt_gff:
|
|
187 for comment_idx in range(count_comment):
|
|
188 next(filt_gff)
|
|
189 with open(DOMAIN_PROT_SEQ, "w") as dom_prot_file:
|
|
190 for line in filt_gff:
|
|
191 attributes = line.rstrip().split("\t")[8]
|
|
192 positions = attributes.split(";")[3].split("=")[1].split(":")[
|
|
193 -1].split("[")[0]
|
|
194 dom = attributes.split(";")[0].split("=")[1]
|
|
195 dom_class = attributes.split(";")[1].split("=")[1]
|
|
196 seq_id = line.rstrip().split("\t")[0]
|
|
197 prot_seq_align = line.rstrip().split("\t")[8].split(";")[
|
|
198 6].split("=")[1]
|
|
199 prot_seq = prot_seq_align.translate({ord(i): None
|
|
200 for i in '/\\-'})
|
|
201 header_prot_seq = ">{}:{} {} {}".format(seq_id, positions, dom,
|
|
202 dom_class)
|
|
203 dom_prot_file.write("{}\n{}\n".format(
|
|
204 header_prot_seq, textwrap.fill(prot_seq,
|
|
205 configuration.FASTA_LINE)))
|
|
206
|
|
207
|
|
208 def main(args):
|
|
209
|
|
210 t = time.time()
|
|
211
|
|
212 DOM_GFF = args.dom_gff
|
|
213 DOMAIN_PROT_SEQ = args.domains_prot_seq
|
|
214 TH_IDENTITY = args.th_identity
|
|
215 TH_LENGTH = args.th_length
|
|
216 TH_INTERRUPT = args.interruptions
|
|
217 TH_SIMILARITY = args.th_similarity
|
|
218 TH_LEN_RATIO = args.max_len_proportion
|
|
219 FILT_DOM_GFF = args.domains_filtered
|
|
220 SELECTED_DOM = args.selected_dom
|
|
221 OUTPUT_DIR = args.output_dir
|
|
222 # DELETE : ELEMENT = args.element_type.replace("_pipe_", "|")
|
|
223 ELEMENT = args.element_type
|
|
224
|
|
225 if DOMAIN_PROT_SEQ is None:
|
|
226 DOMAIN_PROT_SEQ = configuration.DOM_PROT_SEQ
|
|
227 if FILT_DOM_GFF is None:
|
|
228 FILT_DOM_GFF = configuration.FILT_DOM_GFF
|
|
229
|
|
230 if OUTPUT_DIR and not os.path.exists(OUTPUT_DIR):
|
|
231 os.makedirs(OUTPUT_DIR)
|
|
232
|
|
233 if not os.path.isabs(FILT_DOM_GFF):
|
|
234 if OUTPUT_DIR is None:
|
|
235 OUTPUT_DIR = os.path.dirname(os.path.abspath(DOM_GFF))
|
|
236 FILT_DOM_GFF = os.path.join(OUTPUT_DIR, os.path.basename(FILT_DOM_GFF))
|
|
237 DOMAIN_PROT_SEQ = os.path.join(OUTPUT_DIR,
|
|
238 os.path.basename(DOMAIN_PROT_SEQ))
|
|
239
|
|
240 [xminimals_all, xmaximals_all, domains_all, seq_ids_all] = filter_qual_dom(
|
|
241 DOM_GFF, FILT_DOM_GFF, TH_IDENTITY, TH_SIMILARITY, TH_LENGTH,
|
|
242 TH_INTERRUPT, TH_LEN_RATIO, SELECTED_DOM, ELEMENT)
|
|
243 get_domains_protseq(FILT_DOM_GFF, DOMAIN_PROT_SEQ)
|
|
244
|
|
245 print("ELAPSED_TIME_DOMAINS = {} s".format(time.time() - t))
|
|
246
|
|
247
|
|
248 if __name__ == "__main__":
|
|
249 import argparse
|
|
250 from argparse import RawDescriptionHelpFormatter
|
|
251
|
|
252 class CustomFormatter(argparse.ArgumentDefaultsHelpFormatter,
|
|
253 argparse.RawDescriptionHelpFormatter):
|
|
254 pass
|
|
255
|
|
256 parser = argparse.ArgumentParser(
|
|
257 description=
|
|
258 '''The script performs DANTE's output filtering for quality and/or extracting specific type of protein domain or mobile elements of origin. For the filtered domains it reports their translated protein sequence of original DNA.
|
|
259 WHEN NO PARAMETERS GIVEN, IT PERFORMS QUALITY FILTERING USING THE DEFAULT PARAMETRES (optimized for Viridiplantae species)
|
|
260
|
|
261 INPUTS:
|
|
262 - GFF3 file produced by protein_domains.py OR already filtered GFF3
|
|
263
|
|
264 FILTERING OPTIONS:
|
|
265 > QUALITY: - Min relative length of alignemnt to the protein domain from DB (without gaps)
|
|
266 - Identity
|
|
267 - Similarity (scoring matrix: BLOSUM82)
|
|
268 - Interruption in the reading frame (frameshifts + stop codons) per every starting 100 AA
|
|
269 - Max alignment proportion to the original length of database domain sequence
|
|
270 > DOMAIN TYPE: choose from choices ('Name' attribute in GFF)
|
|
271 Records for ambiguous domain type (e.g. INT/RH) are filtered out automatically
|
|
272
|
|
273 > MOBILE ELEMENT TYPE:
|
|
274 arbitrary substring of the element classification ('Final_Classification' attribute in GFF)
|
|
275
|
|
276 OUTPUTS:
|
|
277 - filtered GFF3 file
|
|
278 - fasta file of translated protein sequences (from original DNA) for the aligned domains that match the filtering criteria
|
|
279
|
|
280 DEPENDENCIES:
|
|
281 - python 3.4 or higher
|
|
282 > ProfRep modules:
|
|
283 - configuration.py
|
|
284
|
|
285 EXAMPLE OF USAGE:
|
|
286 Getting quality filtered integrase(INT) domains of all gypsy transposable elements:
|
|
287 ./domains_filtering.py -dom_gff PATH_TO_INPUT_GFF -pdb PATH_TO_PROTEIN_DB -cs PATH_TO_CLASSIFICATION_FILE --selected_dom INT --element_type Ty3/gypsy
|
|
288
|
|
289 ''',
|
|
290 epilog="""""",
|
|
291 formatter_class=CustomFormatter)
|
|
292 requiredNamed = parser.add_argument_group('required named arguments')
|
|
293 requiredNamed.add_argument("-dg",
|
|
294 "--dom_gff",
|
|
295 type=str,
|
|
296 required=True,
|
|
297 help="basic unfiltered gff file of all domains")
|
|
298 parser.add_argument("-ouf",
|
|
299 "--domains_filtered",
|
|
300 type=str,
|
|
301 help="output filtered domains gff file")
|
|
302 parser.add_argument("-dps",
|
|
303 "--domains_prot_seq",
|
|
304 type=str,
|
|
305 help="output file containg domains protein sequences")
|
|
306 parser.add_argument("-thl",
|
|
307 "--th_length",
|
|
308 type=float,
|
|
309 choices=[Range(0.0, 1.0)],
|
|
310 default=0.8,
|
|
311 help="proportion of alignment length threshold")
|
|
312 parser.add_argument("-thi",
|
|
313 "--th_identity",
|
|
314 type=float,
|
|
315 choices=[Range(0.0, 1.0)],
|
|
316 default=0.35,
|
|
317 help="proportion of alignment identity threshold")
|
|
318 parser.add_argument("-ths",
|
|
319 "--th_similarity",
|
|
320 type=float,
|
|
321 choices=[Range(0.0, 1.0)],
|
|
322 default=0.45,
|
|
323 help="threshold for alignment proportional similarity")
|
|
324 parser.add_argument(
|
|
325 "-ir",
|
|
326 "--interruptions",
|
|
327 type=int,
|
|
328 default=3,
|
|
329 help=
|
|
330 "interruptions (frameshifts + stop codons) tolerance threshold per 100 AA")
|
|
331 parser.add_argument(
|
|
332 "-mlen",
|
|
333 "--max_len_proportion",
|
|
334 type=float,
|
|
335 default=1.2,
|
|
336 help=
|
|
337 "maximal proportion of alignment length to the original length of protein domain from database")
|
|
338 parser.add_argument(
|
|
339 "-sd",
|
|
340 "--selected_dom",
|
|
341 type=str,
|
|
342 default="All",
|
|
343 choices=[
|
|
344 "All", "GAG", "INT", "PROT", "RH", "RT", "aRH", "CHDCR", "CHDII",
|
|
345 "TPase", "YR", "HEL1", "HEL2", "ENDO"
|
|
346 ],
|
|
347 help="filter output domains based on the domain type")
|
|
348 parser.add_argument(
|
|
349 "-el",
|
|
350 "--element_type",
|
|
351 type=str,
|
|
352 default="",
|
|
353 help="filter output domains by typing substring from classification")
|
|
354 parser.add_argument(
|
|
355 "-dir",
|
|
356 "--output_dir",
|
|
357 type=str,
|
|
358 default=None,
|
|
359 help="specify if you want to change the output directory")
|
|
360 args = parser.parse_args()
|
|
361 main(args)
|