comparison MolD_v1.4.py @ 0:4e8e2f836d0f draft default tip

planemo upload commit 232ce39054ce38be27c436a4cabec2800e14f988-dirty
author itaxotools
date Sun, 29 Jan 2023 16:25:48 +0000
parents
children
comparison
equal deleted inserted replaced
-1:000000000000 0:4e8e2f836d0f
1 """
2 This script compiles rDNC-based DNA diagnoses for a pre-defined taxa in a dataset. This is the MAIN WORKING VERSION v1.4
3 This version already implements the new functionalities:
4 - automatic trimming of the alignment to match the median sequence length (should seq len distribution and provided NumberN settings require that)
5 - expanded qCLADEs setting
6 - diagnoses in pairwise comparisons of taxa.
7 - selection of a reference sequence for indexing DNCs
8
9 THIS version LACKS SPART compatibility
10
11 """
12 import os, sys
13 import random
14 import argparse
15 from io import StringIO
16 ######################################################################## FUNCTIONS
17 #***STEP 1 - SORTING ENTRIES BY CLADE AND IDENTIFYING NUCLEOTIDE POSITIONS SHARED WITHIN CLADE
18 def Step1(raw_records):
19 Clades=[]
20 for i in range(len(raw_records)):
21 Clade=raw_records[i][1]
22 if Clade not in Clades:
23 Clades.append(Clade)
24 clade_sorted_seqs = {}
25 for letter in Clades:
26 clade_sorted_seqs[letter]=[]
27 for i in range(len(raw_records)):
28 if raw_records[i][1]==letter:
29 clade_sorted_seqs[letter].append(raw_records[i][2])
30 shared_positions={}
31 for key in clade_sorted_seqs:
32 sh_pos=[]
33 for i in range(len(clade_sorted_seqs[key][0])):
34 shared_nucleotide = True
35 csm = clade_sorted_seqs[key][0][i] #candidate shared nucleotide
36 for j in range(1, len(clade_sorted_seqs[key])):
37 if clade_sorted_seqs[key][j][i] != csm:
38 shared_nucleotide = False
39 break
40 if shared_nucleotide == True and csm != 'N':
41 sh_pos.append(i)
42 shared_positions[key]=sh_pos
43 return Clades, clade_sorted_seqs, shared_positions
44
45 #***STEP 2 COMPILING COMPARISON LISTS FOR CLADES AND IDENTIFYING VARIABLE POSITIONS AND N PRIORITY POSITIONS WITH LARGEST CUTOFFS
46 def C_VP_PP(clade_sorted_seqs, clade, shared_positions, CUTOFF):# complist_variable_positions_priority_positions; Arguments: dictionary, string, dictionary
47 CShN={}#a dictionary keys - clade shared positions, values - nucleotides at those positions
48 for pos in shared_positions[clade]:
49 CShN[pos] = clade_sorted_seqs[clade][0][pos]#creates a dictionary shared position : nucleotide
50 complist=[]
51 for key in clade_sorted_seqs:
52 if key != clade:
53 complist = complist + clade_sorted_seqs[key]#creates a list of all other sequences for comparison
54 cutoffs = {}
55 pures = []####! newline
56 for key in CShN:
57 newcomplist = []
58 for k in complist:
59 if k[key] == CShN[key]:
60 newcomplist.append(k)
61 else: continue
62 cutoffs[key] = len(complist) - len(newcomplist)
63 if len(newcomplist) == 0:####! newline
64 pures.append(key)####! newline
65 CPP = []
66 for key in sorted(cutoffs, key = cutoffs.get, reverse = True):
67 CPP.append(key)
68 if CUTOFF[0] == '>':#VERYNEW
69 Clade_priority_positions = {pos:CShN[pos] for pos in CPP if cutoffs[pos] > int(CUTOFF[1:])}#VERYNEW
70 else:#VERYNEW
71 Clade_priority_positions = {}
72 for position in CPP[:int(CUTOFF)]:#Here you define how many of the clade shared combinations are used in subsequent search
73 Clade_priority_positions[position] = CShN[position]
74 return complist, Clade_priority_positions, cutoffs, pures####! pures added
75
76 #***STEPS 3 RANDOM SEARCH ACROSS PRIORITY POSITIONS TO FIND RAW DIAGNOSTIC COMBINATIONS AND TO SUBSEQUENTLY REFINE THEM
77 def random_position(somelist, checklist):#gives a random index (integer) of the specified range, and returns indexed somelist element if it is not present in the checklist
78 while True:
79 i = random.randint(0, len(somelist) - 1)
80 if somelist[i] not in checklist:
81 return somelist[i]
82 break
83 else:
84 continue
85
86 def step_reduction_complist(clade, complist, CPP, checked_ind):#checks randomly selected positions of CladeSharedNucleotides with sequences of other clades, until a diagnostic combination of nucleotides for a selected clade is found.
87 if len(complist) == 0:
88 return checked_ind
89 elif len(checked_ind) == len(CPP):
90 return checked_ind
91 else:
92 newcomplist = []
93 pos = random_position(list(CPP.keys()), checked_ind)
94 for j in complist:
95 if j[pos] == CPP[pos] or j[pos] == 'N':#VERYNEW
96 newcomplist.append(j)
97 else: continue
98 new_checked_ind = checked_ind + [pos]
99 return step_reduction_complist(clade, newcomplist, CPP, new_checked_ind)
100
101 def ConditionD(newcomb, complist, CPP):#The function checks the 'Condition D' - i.e. whither any given combination of nucleotide positions is diagnostic for the selected clade
102 ContD = False
103 for i in newcomb:
104 newcomplist = []
105 for m in complist:
106 if m[i] == CPP[i]:
107 newcomplist.append(m)
108 else: continue
109 complist = newcomplist
110 if len(complist) == 0:
111 ContD = True
112 return ContD
113
114 def RemoveRedundantPositions(raw_comb, complist, CPP):# The function removes positions from the raw combinations one by one, and then checks whether new combination fulfills the condition D, thus recursively reducing the diagnostic combination.
115 red_possible = False
116 for j in raw_comb:
117 newcomb = [k for k in raw_comb if k != j]
118 if ConditionD(newcomb, complist, CPP) == True:
119 red_possible = True
120 return RemoveRedundantPositions(newcomb, complist, CPP)
121 else: pass
122 if red_possible == False:
123 return raw_comb
124
125 #PUTS EVERYTHING TOGETHER - 20000 ROUNDS OF RANDOM SEARCH FOLLOWED BY REFINING OF 500 SHORTEST COMBINATIONS
126 def Diagnostic_combinations(qCLADE, complist, CPP, n1, maxlen1, maxlen2):
127 Achecked_ind = []
128 bestlists = []
129 n = n1
130 while n>0:#STEP3 proposes raw diagnostic combinations
131 raw_comb = step_reduction_complist(qCLADE, complist, CPP, Achecked_ind)
132 if len(raw_comb) <= maxlen1:
133 refined_comb = RemoveRedundantPositions(raw_comb, complist, CPP)
134 if len(refined_comb) <= maxlen2 and sorted(refined_comb) not in bestlists:
135 bestlists.append(sorted(refined_comb))
136 n=n-1
137 bestlists.sort(key=len)
138 return bestlists
139
140 #***STEP 4 ANALYSIS OF OUTPUT rDNCs
141 def IndependentKey(diagnostic_combinations):#PRESENTLY NOT INVOLVED - returns independent diagnostic nucleotide combinations, and identifies key nucleotide positions
142 independent_combinations = []
143 selected_positions = []
144 for i in range(len(diagnostic_combinations)):
145 if len(selected_positions) == 0:
146 for j in range(0, i):
147 if len(set(diagnostic_combinations[i]) & set(diagnostic_combinations[j])) == 0 and len(set(diagnostic_combinations[i]) & set(selected_positions)) == 0:
148 independent_combinations.append(diagnostic_combinations[i])
149 independent_combinations.append(diagnostic_combinations[j])
150 for k in range(len(diagnostic_combinations[i])):
151 selected_positions.append(diagnostic_combinations[i][k])
152 for l in range(len(diagnostic_combinations[j])):
153 selected_positions.append(diagnostic_combinations[j][l])
154 else:
155 if len(set(diagnostic_combinations[i]) & set(selected_positions)) == 0:
156 independent_combinations.append(diagnostic_combinations[i])
157 for k in range(len(diagnostic_combinations[i])):
158 selected_positions.append(diagnostic_combinations[i][k])
159 independent_combinations.sort(key=len)
160 key_positions = []
161 for pos in diagnostic_combinations[0]:
162 KP = True
163 for combination in diagnostic_combinations[1:]:
164 if pos not in combination:
165 KP = False
166 break
167 else: continue
168 if KP == True:
169 key_positions.append(pos)
170 return independent_combinations, key_positions
171
172 #SPECIFIC FUNCTIONS FOR THE rDNCs
173 def PositionArrays(Motifs):#VERYNEW ALL FUNCTION NEW
174 PositionArrays = []
175 VarPosList = []
176 for i in range(len(Motifs[0])):
177 Const = True
178 array = [Motifs[0][i]]
179 for j in range(len(Motifs[1:])):
180 if Motifs[j][i] != 'N':
181 if Motifs[j][i] != array[-1]:
182 Const = False
183 array.append(Motifs[j][i])
184 PositionArrays.append(array)
185 if Const == False:
186 VarPosList.append(i)
187 return PositionArrays, VarPosList
188
189 def random_sequence_new(SEQ, PositionArrays, VarPosList, Pdiff):#VERYNEW FUNCTION REVISED
190 #print(["ROUND", len(SEQ)*Pdiff/100, round(len(SEQ)*Pdiff/100)])
191 n = round(len(SEQ)*Pdiff/100)
192 N = random.sample(list(range(1, n)), 1)[0]
193 PosToChange = random.sample([p for p in VarPosList if SEQ[p] != 'D'], N)#this is a new definition to keep alignment gaps unchanged
194 NEWSEQ = ''
195 for i in range(len(SEQ)):
196 if i not in PosToChange:
197 NEWSEQ = NEWSEQ + SEQ[i]
198 else:
199 newarray = [j for j in PositionArrays[i] if j != SEQ[i]]
200 newbase = random.sample(newarray, 1)[0]
201 NEWSEQ = NEWSEQ + newbase
202 return NEWSEQ
203
204 def GenerateBarcode_new(Diagnostic_combinations, length):#VERYNEW FUNCTION REVISED - This function calculates diagnostic combinations and assembles a barcode of desired length for a query taxon. First all single position DNCs are added, then based on the frequency of a nucleotide position in the DNCs of the 2 positions, and then based on the frequency of a position in longer DNCs
205 len1 = []
206 len2 = []
207 lenmore = []
208 for comb in Diagnostic_combinations:
209 if len(comb) == len(Diagnostic_combinations[0]):
210 for i in comb:
211 len1.append(i)
212 elif len(comb) == len(Diagnostic_combinations[0])+1:
213 for j in comb:
214 len2.append(j)
215 else:
216 for k in comb:
217 lenmore.append(k)
218 if len(Diagnostic_combinations[0]) == 1:
219 Setin = len1
220 else:
221 Setin = []
222 for pos in sorted(len1, key=len1.count, reverse = True):
223 if not pos in Setin:
224 Setin.append(pos)
225 for pos1 in sorted(len2, key=len2.count, reverse = True):
226 if not pos1 in Setin:
227 Setin.append(pos1)
228 for pos2 in sorted(lenmore, key=lenmore.count, reverse = True):
229 if not pos2 in Setin:
230 Setin.append(pos2)
231 return Setin[:length]
232
233 def Screwed_dataset_new(raw_records, nseq_per_clade_to_screw, PositionArrays, VarPosList, Percent_difference, Taxon, Cutoff):#VERYNEW FUNCTION REVISED
234 clades=[]
235 for i in range(len(raw_records)):
236 Clade=raw_records[i][1]
237 if Clade not in clades:
238 clades.append(Clade)
239 clade_sorted_seqs = {}
240 for letter in clades:
241 clade_sorted_seqs[letter]=[]
242 for i in range(len(raw_records)):
243 if raw_records[i][1]==letter:
244 clade_sorted_seqs[letter].append(raw_records[i][2])
245
246 for clade in clades:
247 seqlist = clade_sorted_seqs[clade]
248 newseqs = []
249 if len(seqlist) > nseq_per_clade_to_screw:
250 iSTS = random.sample(list(range(len(seqlist))), nseq_per_clade_to_screw)
251 for k in range(len(seqlist)):
252 if k in iSTS:
253 newseq = random_sequence_new(seqlist[k], PositionArrays, VarPosList, Percent_difference)
254 else:
255 newseq = seqlist[k]
256 newseqs.append(newseq)
257 elif len(clade_sorted_seqs[clade]) == nseq_per_clade_to_screw:
258 for k in range(len(seqlist)):
259 newseq = random_sequence_new(seqlist[k], PositionArrays, VarPosList, Percent_difference)
260 newseqs.append(newseq)
261 else:
262 for i in range(nseq_per_clade_to_screw):
263 seq = random.sample(seqlist, 1)[0]
264 newseq = random_sequence_new(seq, PositionArrays, VarPosList, Percent_difference)
265 newseqs.append(newseq)
266 clade_sorted_seqs[clade] = newseqs
267
268 shared_positions={}
269 for key in clade_sorted_seqs:
270 sh_pos=[]
271 for i in range(len(clade_sorted_seqs[key][0])):
272 shared_nucleotide = True
273 csm = clade_sorted_seqs[key][0][i] #candidate shared nucleotide
274 for j in range(1, len(clade_sorted_seqs[key])):
275 if clade_sorted_seqs[key][j][i] != csm:
276 shared_nucleotide = False
277 break
278 if shared_nucleotide == True and csm != 'N':
279 sh_pos.append(i)
280 shared_positions[key]=sh_pos
281
282 x,y,z,pures = C_VP_PP(clade_sorted_seqs, Taxon, shared_positions, Cutoff)#STEP2####
283 return x, y
284
285 #NEWFUNCYIONS
286 def medianSeqLen(listofseqs):#OCT2022
287 seqlens = [i.count('A')+i.count('C')+i.count('G')+i.count('T') for i in listofseqs]
288 medlen = sorted(seqlens)[int(len(seqlens)/2)]
289 medseq = listofseqs[seqlens.index(medlen)]
290 start = min([medseq.find('A'),medseq.find('C'),medseq.find('G'),medseq.count('T')])
291 if not 'N' in medseq[start:]:
292 end = len(medseq)
293 else:
294 for i in range(start, len(medseq), 1):
295 if medseq[i:].count('N') == len(medseq[i:]):
296 end = i
297 break
298 return medlen, start, end
299
300 def getAllPairs(taxalist):
301 uniquetaxapairs = []
302 stl = sorted(list(set(taxalist)))
303 for i in range(len(stl)):
304 for j in range(i+1, len(stl)):
305 uniquetaxapairs.append([stl[i],stl[j]])
306 return uniquetaxapairs
307
308
309 ################################################READ IN PARAMETER FILE AND DATA FILE
310
311 def get_args(): #arguments needed to give to this script
312 parser = argparse.ArgumentParser(description="run MolD")
313 required = parser.add_argument_group("required arguments")
314 required.add_argument("-i", help="textfile with parameters of the analysis", required=True)
315 return parser.parse_args()
316
317 def mainprocessing(gapsaschars=None, taxalist=None, taxonrank=None, cutoff=None, numnucl=None, numiter=None, maxlenraw=None, maxlenrefined=None, iref=None, pdiff=None, nmax=None, thresh=None, tmpfname=None, origfname=None):
318 ParDict = {}
319 if not(all([gapsaschars, taxalist, taxonrank, cutoff, numnucl, numiter, maxlenraw, maxlenrefined, iref, pdiff, nmax, thresh, tmpfname])):
320 args = get_args()
321 with open(args.i) as params:
322 for line in params:
323 line = line.strip()
324 if line.startswith('#'):
325 pass
326 else:
327 if len(line.split('=')) == 2 and len(line.split('=')[1]) != 0:
328 ParDict[line.split('=')[0]] = line.split('=')[1].replace(' ', '')#VERYNEW
329 else:
330 ParDict['Gaps_as_chars'] = gapsaschars
331 ParDict['qTAXA'] = taxalist
332 ParDict['Taxon_rank'] = taxonrank
333 ParDict['INPUT_FILE'] = tmpfname
334 ParDict['ORIG_FNAME'] = origfname# I do not understand this ORIG_FNAME, it throws an error with the command line
335 ParDict['Cutoff'] = cutoff
336 ParDict['NumberN'] = numnucl
337 ParDict['Number_of_iterations'] = numiter
338 ParDict['MaxLen1'] = maxlenraw
339 ParDict['MaxLen2'] = maxlenrefined
340 ParDict['Iref'] = iref
341 ParDict['Pdiff'] = pdiff
342 #ParDict['PrSeq'] = prseq
343 ParDict['NMaxSeq'] = nmax
344 ParDict['Scoring'] = thresh
345 ParDict['OUTPUT_FILE'] = "str"
346 print(ParDict)
347 ############################################# #VERYNEW HOW GAPS ARE TREATED
348 #REQUIRES A NEW FIELD IN THE GUI
349 if ParDict['Gaps_as_chars'] == 'yes':
350 gaps2D = True#VERYNEW
351 else:#VERYNEW
352 gaps2D = False#VERYNEW
353
354 ############################################ DATA FILE
355 checkread = open(ParDict['INPUT_FILE'], 'r')
356 firstline = checkread.readline()
357 checkread.close()
358 imported=[]#set up a new list with species and identifiers
359 if '>' in firstline:
360 f = open(ParDict['INPUT_FILE'], 'r')
361 for line in f:#VERYNEW - THE DATA READING FROM ALIGNMENT FILE IS ALL REVISED UNTIL f.close()
362 line=line.rstrip()
363 if line.startswith('>'):
364 data = line[1:].split('|')
365 if len(data) != 2:
366 print('Check number of entries in', data[0])
367 #break
368 data.append('')
369 imported.append(data)
370 else:
371 if gaps2D == True:#
372 DNA = line.upper().replace('-', 'D').replace('R', 'N').replace('Y', 'N').replace('S','N').replace('W','N').replace('M','N').replace('K','N')#VERYNEW
373 else:
374 DNA = line.upper().replace('-', 'N').replace('R', 'N').replace('Y', 'N').replace('S','N').replace('W','N').replace('M','N').replace('K','N')#VERYNEW
375 imported[-1][-1] = imported[-1][-1]+DNA
376 f.close()
377 else:
378 f = open(ParDict['INPUT_FILE'], 'r')
379 for line in f:#VERYNEW - THE DATA READING FROM ALIGNMENT FILE IS ALL REVISED UNTIL f.close()
380 line=line.rstrip()
381 data = line.split('\t')
382 if len(data) != 3:
383 print('Check number of entries in', data[0])
384 ID = data[0]
385 thetax = data[1]
386 if gaps2D == True:#
387 DNA = data[2].upper().replace('-', 'D').replace('R', 'N').replace('Y', 'N').replace('S','N').replace('W','N').replace('M','N').replace('K','N')#VERYNEW
388 else:
389 DNA = data[2].upper().replace('-', 'N').replace('R', 'N').replace('Y', 'N').replace('S','N').replace('W','N').replace('M','N').replace('K','N')#VERYNEW
390 imported.append([ID, thetax, DNA])
391 f.close()
392
393 if 'NumberN' in list(ParDict.keys()):#How many ambiguously called nucleotides are allowed
394 NumberN = int(ParDict['NumberN'])
395 else:
396 NumberN = 5
397
398 if len(set([len(i[2]) for i in imported])) != 1:
399 print('Alignment contains sequences of different lengths:', set([len(i[2]) for i in imported]))
400 else:
401 mlen, sstart, send = medianSeqLen([i[2] for i in imported])#OCT2022 - start
402 if mlen + NumberN < len(imported[0][2]):
403 Slice = True
404 FragmentLen = mlen
405 corr = sstart
406 else:
407 Slice = False
408 FragmentLen = len(imported[0][2])
409 sstart = 0
410 send = FragmentLen+1
411 corr = 0
412 raw_records=[]
413 for i in imported:
414 if ParDict['Iref'] != 'NO' and ParDict['Iref'].split(',')[0] == i[0] and ParDict['Iref'].split(',')[1] in ['ex', 'excl', 'out']:
415 continue
416 else:
417 if i[2][sstart:send].count('N') < NumberN and len(i[2][sstart:send]) == FragmentLen:
418 newi = [i[0], i[1], i[2][sstart:send]]
419 raw_records.append(newi)#OCT2022 - end
420 print('\n########################## PARAMETERS ######################\n')#VERYNEW
421 #print('input file:', ParDict['ORIG_FNAME']) #Outcommented ORIG_FNAME
422 print('input file:', ParDict['INPUT_FILE']) #Replacement of the line above
423 print('Coding gaps as characters:', gaps2D)
424 print('Maximum undetermined nucleotides allowed:', NumberN)
425 print('Length of the alignment:', len(imported[0][2]),'->', FragmentLen)
426 print('Indexing reference:', ParDict['Iref'].replace('NO', 'Not set').replace('in', 'included').replace('ex', 'excluded'))
427 print('Read in', len(raw_records), 'sequences')
428
429 PosArrays, VarPosList = PositionArrays([i[2] for i in raw_records])#VERYNEW
430
431 #############################################READ IN OTHER ANALYSIS PARAMETERS
432 ##OCT2022 - start
433 withplus = []
434 P2 = []
435 shift = True
436 if ParDict['qTAXA'][0] == '>':#THIS OPTION DIAGNOSES ALL TAXA WITH MORE THAN USER-DEFINED NUMBER OF SEQUENCES AVAILABLE
437 NumSeq = int(ParDict['qTAXA'][1:])
438 Taxarecords = [i[1] for i in raw_records]
439 qCLADEs = []
440 for j in set(Taxarecords):
441 if Taxarecords.count(j) >= NumSeq:
442 qCLADEs.append(j)
443 elif ParDict['qTAXA'].startswith('P:'):#THIS OPTION DIAGNOSES ALL TAXA CONTAINING A USER-DEFINED PATTERN IN THE NAME
444 pattern = ParDict['qTAXA'].split(':')[1]
445 Taxarecords = [i[1] for i in raw_records]
446 qCLADEs = []
447 for j in set(Taxarecords):
448 if pattern in j:
449 qCLADEs.append(j)
450 elif ParDict['qTAXA'].startswith('P+:'):#THIS OPTION POOLS ALL TAXA CONTAINING A USER-DEFINED PATTERN IN THE NAME IN ONE TAXON AND DIAGNOSES IT
451 pattern = ParDict['qTAXA'].split(':')[1]
452 Taxarecords = set([i[1] for i in raw_records if pattern in i[1]])
453 spp = '+'.join(sorted(list(Taxarecords)))
454 qCLADEs = [spp]
455 nrecords = []
456 for rec in raw_records:
457 if rec[1] in Taxarecords:
458 nrecords.append([rec[0], spp, rec[2]])
459 else:
460 nrecords.append(rec)
461 raw_records = nrecords
462 else:#THIS OPTION DIAGNOSES ALL TAXA FROM A USER-DEFINED LIST; TAXA MAY BE COMBINED BY USING '+'
463 qCLADEs = []
464 allrecs = ParDict['qTAXA'].split(',')
465 for item in allrecs:
466 if item in ['ALL', 'All', 'all']:#THIS OPTION DIAGNOSES ALL TAXA IN THE DATASET
467 qCLADEs = list(set([i[1] for i in raw_records]))
468 elif item in [i[1] for i in raw_records]:
469 qCLADEs.append(item)
470 elif '+' in item:
471 withplus.append(item)
472 elif 'VS' in item:
473 P2.append(item.split('VS'))
474 else:
475 print('UNRECOGNIZED TAXON', item)
476 #OCT2022 - end
477 print('query taxa:', len(qCLADEs+withplus), '-', str(sorted(qCLADEs)+sorted(withplus)).replace('[','').replace(']','').replace("'", ''))#1.3
478
479 if 'Cutoff' in list(ParDict.keys()):#CUTOFF Number of the informative positions to be considered, default 100
480 Cutoff = ParDict['Cutoff']#VERYNEW
481 else:
482 Cutoff = 100
483 print('Cutoff set as:', Cutoff)
484 if 'Number_of_iterations' in list(ParDict.keys()):#Number iterations of MolD
485 N1 = int(ParDict['Number_of_iterations'])
486 else:
487 N1 = 10000
488 print('Number iterations of MolD set as:', N1)
489
490 if 'MaxLen1' in list(ParDict.keys()):#Maximum length for the raw mDNCs
491 MaxLen1 = int(ParDict['MaxLen1'])
492 else:
493 MaxLen1 = 12
494 print('Maximum length of raw mDNCs set as:', MaxLen1)
495
496 if 'MaxLen2' in list(ParDict.keys()):#Maximum length for the refined mDNCs
497 MaxLen2 = int(ParDict['MaxLen2'])
498 else:
499 MaxLen2 = 7
500 print('Maximum length of refined mDNCs set as:', MaxLen2)
501
502 if 'Pdiff' in list(ParDict.keys()):#Percent difference
503 Percent_difference = float(ParDict['Pdiff'])
504 else:
505 if int(ParDict['Taxon_rank']) == 1:#read in taxon rank to configure Pdiff parameter of artificial dataset
506 Percent_difference = 1
507 else:
508 Percent_difference = 5
509 print('simulated sequences up to', Percent_difference, 'percent divergent from original ones')
510
511 if 'NMaxSeq' in list(ParDict.keys()):#Maximum number of sequences per taxon to be modified
512 Seq_per_clade_to_screw = int(ParDict['NMaxSeq'])
513 else:
514 Seq_per_clade_to_screw = 10####!changed value
515 print('Maximum number of sequences modified per clade', Seq_per_clade_to_screw)
516
517 if 'Scoring' in list(ParDict.keys()):
518 if ParDict['Scoring'] == 'lousy':
519 threshold = 66####!changed value
520 elif ParDict['Scoring'] == 'moderate':
521 threshold = 75####!changed value
522 elif ParDict['Scoring'] == 'stringent':
523 threshold = 90####!changed value
524 elif ParDict['Scoring'] == 'very_stringent':
525 threshold = 95####!changed value
526 else:
527 threshold = 75####!changed value
528 else:
529 threshold = 75####!changed value
530 #print(ParDict['Scoring'], 'scoring of the rDNCs; threshold in two consequtive runs:', threshold)
531 print('scoring of the rDNCs; threshold in two consequtive runs:', threshold)
532 #OCT2022 - start
533 if corr > 1 and len(ParDict['Iref'].split(',')) == 2:
534 print('\nNOTE: The alignment was trimmed automatically to match median sequences length. The analysed slice starts from the site',str(sstart+1),'and ends on the site',str(send+1),'. The site indexing in the DNCs as in the provided reference.')
535 if corr > 1 and ParDict['Iref'] == 'NO':
536 print('\nNOTE: The alignment was trimmed automatically to match median sequences length. The analysed slice starts from the site',str(sstart+1),'and ends on the site',str(send+1),'. The site indexing in the rDNC as in the untrimmed alignment.')
537 #OCT2022 - end
538 thephrase = 'The DNA diagnosis for the taxon'
539 ###################################################IMPLEMENTATION
540 #Setting up a new class just for the convenient output formatting
541 class SortedDisplayDict(dict):#this is only to get a likable formatting of the barcode
542 def __str__(self):
543 return "[" + ", ".join("%r: %r" % (key, self[key]) for key in sorted(self)) + "]"
544
545 class SortedDisplayDictVerbose(dict):#this is only to get a likable formatting of the barcode
546 def __str__(self):
547 return ", ".join("%r %r" % (self[key],'in the site '+str(key)) for key in sorted(self)).replace("'", '').replace("A", "'A'").replace("C", "'C'").replace("G", "'G'").replace("T", "'T'")+'.'
548
549 #Calling functions and outputing results
550 if ParDict['OUTPUT_FILE'] == "str":
551 g = StringIO()
552 else:
553 g = open(ParDict['OUTPUT_FILE'], "w")#Initiating output file
554 #VERYNEW
555 print('<h4>########################## PARAMETERS ######################</h4>', file=g)
556 #print("<p>", 'input file:', ParDict['ORIG_FNAME'], "</p>", file=g) #outcommented ORIG_FNAME
557 print("<p>", 'input file:', ParDict['INPUT_FILE'], "</p>", file=g)
558 print("<p>", 'Coding gaps as characters:', gaps2D, "</p>", file=g)
559 print("<p>", 'Maximum undetermined nucleotides allowed:', NumberN, "</p>", file=g)
560 print("<p>", 'Length of the alignment:', len(imported[0][2]),'->', FragmentLen, "</p>", file=g)
561 print("<p>", 'Indexing reference:', ParDict['Iref'].replace('NO', 'Not set').replace('in', 'included').replace('ex', 'excluded'), "</p>", file=g)#OCT2022
562 print("<p>", 'Read in', len(raw_records), 'sequences', "</p>", file=g)
563 print("<p>", 'query taxa:', len(qCLADEs+withplus), '-', str(sorted(qCLADEs)+sorted(withplus)).replace('[','').replace(']','').replace("'", ''), "</p>", file=g)#1.3
564 print("<p>", 'Cutoff set as:', Cutoff, "</p>", file=g)
565 print("<p>", 'Number iterations of MolD set as:', N1, "</p>", file=g)
566 print("<p>", 'Maximum length of raw mDNCs set as:', MaxLen1, "</p>", file=g)
567 print("<p>", 'Maximum length of refined mDNCs set as:', MaxLen2, "</p>", file=g)
568 print("<p>", 'simulated sequences up to', Percent_difference, 'percent divergent from original ones', "</p>", file=g)
569 print("<p>", 'Maximum number of sequences modified per clade', Seq_per_clade_to_screw, "</p>", file=g)
570 #print("<p>", ParDict['Scoring'], 'scoring of the rDNCs; threshold in two consequtive runs:', threshold, "</p>", file=g)
571 print("<p>", 'scoring of the rDNCs; threshold in two consequtive runs:', threshold, "</p>", file=g)
572 if corr > 1 and len(ParDict['Iref'].split(',')) == 2:
573 print('<h4>NOTE: The alignment was trimmed automatically to match median sequences length. The analysed slice starts from the site',str(sstart+1),'and ends on the site',str(send+1),'. The site indexing in the DNCs as in the provided reference.</h4>', file=g)
574 if corr > 1 and ParDict['Iref'] == 'NO':
575 print('<h4>NOTE: The alignment was trimmed automatically to match median sequences length. The analysed slice starts from the site',str(sstart+1),'and ends on the site',str(send+1),'. The site indexing in the rDNC as in the untrimmed alignment.</h4>', file=g)
576
577 print('<h4>########################### RESULTS ##########################</h4>', file=g)
578
579 for qCLADE in sorted(qCLADEs) + sorted(withplus):#OCT2022
580 if '+' in qCLADE:
581 if shift == True:
582 old_records = raw_records
583 shift == False
584 else:
585 raw_records = old_records
586 spp = qCLADE.split('+')
587 nrecords = []
588 for rec in raw_records:
589 if rec[1] in spp:
590 nrecords.append([rec[0], qCLADE, rec[2]])
591 else:
592 nrecords.append(rec)
593 raw_records = nrecords
594 print('\n**************', qCLADE, '**************')
595 print('<h4>**************', qCLADE, '**************</h4>', file=g)
596 Clades, clade_sorted_seqs, shared_positions = Step1(raw_records)#STEP1
597 x,y,z,pures = C_VP_PP(clade_sorted_seqs, qCLADE, shared_positions, Cutoff)#STEP2 ####! added pures
598 newy = {key:y[key] for key in y if not key in pures} ####! newline
599 print('Sequences analyzed:', len(clade_sorted_seqs[qCLADE]))
600 print("<p>",'Sequences analyzed:', len(clade_sorted_seqs[qCLADE]), "</p>", file=g)
601 ND_combinations = [[item] for item in pures] ####! before ND_combinations were initiated as an empty list
602 print('single nucleotide mDNCs:', len(pures), '-', str(SortedDisplayDict({pos+corr : y[pos-1] for pos in [i+1 for i in pures]}))[1:-1])#OCT2022
603 print("<p>",'single nucleotide mDNCs*:',len(pures), '-', str(SortedDisplayDict({pos+corr : y[pos-1] for pos in [i+1 for i in pures]}))[1:-1], "</p>", file=g)#OCT2022
604 N = 1 ####!
605 while N > 0:#STEP3
606 try:
607 q = Diagnostic_combinations(qCLADE, x, newy, N1, MaxLen1, MaxLen2) ####! newy instead of y
608 except IndexError:
609 print(N, 'IndexError')
610 continue
611 for comb in q:
612 if not comb in ND_combinations:
613 ND_combinations.append(comb)
614 N-=1
615 ND_combinations.sort(key=len)
616 #################################### mDNC output
617 try:
618 Nind, KeyPos = IndependentKey(ND_combinations)#STEP4
619 except IndexError:
620 print('no mDNCs recovered for', qCLADE)#VERYNEW
621 print("<p>", 'no mDNCs recovered for', "</p>", qCLADE, file=g)#VERYNEW
622 continue
623 Allpos = []#Create list of all positions involved in mDNCs
624 for comb in ND_combinations:
625 for pos in comb:
626 if not pos in Allpos:
627 Allpos.append(pos)
628 print('\nmDNCs retrieved:', str(len(ND_combinations)) + '; Sites involved:', str(len(Allpos)) + '; Independent mDNCs:', len(Nind))#VERYNEW
629 print("<p>", 'mDNCs* retrieved:', str(len(ND_combinations)) + '; Sites involved:', str(len(Allpos)) + '; Independent mDNCs**:', len(Nind), "</p>", file=g)#VERYNEW
630 print('Shortest retrieved mDNC:', SortedDisplayDict({pos+corr : y[pos-1] for pos in [i+1 for i in ND_combinations[0]]}), '\n')#OCT2022
631 print("<p>",'Shortest retrieved mDNC*:', SortedDisplayDict({pos+corr : y[pos-1] for pos in [i+1 for i in ND_combinations[0]]}), "</p>", file=g)#OCT2022
632 ######################################################## rDNC output
633 Barcode_scores = []#Initiate a list for rDNC scores
634 npos = len(ND_combinations[0])
635 BestBarcode = 'none'####! newline
636 while npos <= min([10, len(Allpos)]):#in this loop the positions are added one-by-one to a rDNC and the rDNC is then rated on the artificially generated datasets
637 Barcode = GenerateBarcode_new(ND_combinations, npos)#Initiate a rDNC
638 Barcode_score = 0#Initiate a score to rate a rDNC
639 N = 100
640 while N > 0:
641 NComplist, NCPP = Screwed_dataset_new(raw_records, Seq_per_clade_to_screw, PosArrays, VarPosList, Percent_difference, qCLADE, Cutoff)#Create an artificial dataset VERYNEW
642 NBarcode = [i for i in Barcode if i in list(NCPP.keys())]
643 if len(Barcode) - len(NBarcode) <= 1 and ConditionD(NBarcode, NComplist, NCPP) == True:####! new condition (first) added
644 Barcode_score +=1
645 N -=1
646 print(npos, 'rDNC_score (100):', [k+corr+1 for k in Barcode], '-', Barcode_score)#VERYNEW
647 print("<p>", npos, 'rDNC_score (100):', [k+corr+1 for k in Barcode], '-', Barcode_score, "</p>", file=g)#OCT2022
648 if Barcode_score >= threshold and len(Barcode_scores) == 1: ###1.3
649 BestBarcode = Barcode###1.3
650 if Barcode_score >= threshold and len(Barcode_scores) > 1 and Barcode_score >= max(Barcode_scores): ###1.3
651 BestBarcode = Barcode####!newline
652 Barcode_scores.append(Barcode_score)
653 if len(Barcode_scores) >= 2 and Barcode_scores[-1] >= threshold and Barcode_scores[-2] >= threshold:#Check whether the rDNC fulfills robustnes criteria 85:85:85
654 print('Final rDNC:', SortedDisplayDict({pos+corr : y[pos-1] for pos in [i+1 for i in BestBarcode]}))#OCT2022
655 print("<p>",'Final rDNC***:', SortedDisplayDict({pos+corr : y[pos-1] for pos in [i+1 for i in BestBarcode]}), "</p>", file=g)#OCT2022
656 print('\n',thephrase, qCLADE, 'is:', SortedDisplayDictVerbose({pos+corr : y[pos-1] for pos in [i+1 for i in BestBarcode]}))#OCT2022
657 print("<p>", thephrase, qCLADE, 'is:', SortedDisplayDictVerbose({pos+corr : y[pos-1] for pos in [i+1 for i in BestBarcode]}), "</p>", file=g)#OCT2022
658 break
659 else:# VERY NEW FROM HERE ONWARDS
660 npos += 1
661 if npos > min([10, len(Allpos)]):
662 if BestBarcode != 'none':
663 print('The highest scoring rDNC for taxon', qCLADE, 'is:', SortedDisplayDictVerbose({pos+corr : y[pos-1] for pos in [i+1 for i in BestBarcode]}))#OCT2022
664 print("<p>", 'The highest scoring rDNC*** for taxon', qCLADE, 'is:', SortedDisplayDictVerbose({pos+corr : y[pos-1] for pos in [i+1 for i in BestBarcode]}), "</p>", file=g)#OCT2022
665 else:
666 print('No sufficiently robust DNA diagnosis for taxon', qCLADE, 'was retrieved')
667 print("<p>", 'No sufficiently robust DNA diagnosis for taxon', qCLADE, 'was retrieved', "</p>", file=g)
668 #OCT2022 - start
669 print("<h4>", '################################# EXPLANATIONS ####################################', "</h4>", file=g)
670 print("<p>", ' * mDNC -(=minimal Diagnostic nucleotide combination) is a combination of nucleotides at specified sites of the alignment,', "</p>", file=g)
671 print("<p>", ' unique for a query taxon. Therefore it is sufficient to differentiate a query taxon from all reference taxa in a dataset.', "</p>", file=g)
672 print("<p>", ' Because it comprises minimal necessary number of nucleotide sites to differentiate a query, any mutation in the mDNC in' "</p>", file=g)
673 print("<p>", ' single specimen of a query taxon will automatically disqualify it as a diagnostic combination.', "</p>", file=g)
674 print("<p>", "</p>", file=g)
675 print("<p>", ' ** two or more mDNCs are INDEPENDENT if they constitute non-overlapping sets of nucleotide sites.', "</p>", file=g)
676 print("<p>", "</p>", file=g)
677 print("<p>", '*** rDNC -(=robust/redundant Diagnostic nucleotide combination) is a combination of nucleotides at specified sites of the alignment,', "</p>", file=g)
678 print("<p>", ' unique for a query taxon and (likewise mDNC) sufficient to differentiate a query taxon from all reference taxa in a dataset.', "</p>", file=g)
679 print("<p>", ' However, rDNC comprises more than a minimal necessary number of diagnostic sites, and therefore is robust to single nucleotide', "</p>", file=g)
680 print("<p>", ' replacements. Even if a mutation arises in one of the rDNC sites, the remaining ones will (with high probability) remain sufficient ', "</p>", file=g)
681 print("<p>", ' to diagnose the query taxon', "</p>", file=g)
682 print("<h4>", ' Final diagnosis corresponds to rDNC', "</h4>", file=g)
683 #OCT2022 - end
684
685 if ParDict['OUTPUT_FILE'] == "str":
686 contents = g.getvalue()
687 os.unlink(ParDict['INPUT_FILE'])
688 else:
689 contents = None
690 g.close()
691 #OCT2022 - start
692 if len(P2) != 0:
693 ext = '.'+ParDict['OUTPUT_FILE'].split('.')[-1]
694 h = open(ParDict['OUTPUT_FILE'].replace(ext, '_pairwise'+ext), "w")#Initiating output file
695 if len(withplus) != 0:
696 raw_records = old_records
697 taxain = [i[1] for i in raw_records]
698 tpairs = []
699 for alist in P2:
700 if alist.count('all')+alist.count('All')+alist.count('ALL') == 2:
701 tpairs = getAllPairs(taxain)
702 elif alist.count('all')+alist.count('All')+alist.count('ALL') == 1:
703 thetax = [i for i in taxain if i in alist][0]
704 print(thetax)
705 for atax in sorted(list(set(taxain))):
706 if atax != thetax:
707 tpairs.append([thetax, atax])
708 else:
709 for apair in getAllPairs(alist):
710 tpairs.append(apair)
711 for apair in tpairs:
712 t1 = apair[0]
713 t2 = apair[1]
714 p2records = [i for i in raw_records if i[1] in [t1, t2]]
715 print('\n**************', t1, 'VS', t2,'**************')
716 print('<h4>**************', t1, 'VS', t2, '**************</h4>', file=h)
717 C2, css2, sp2 = Step1(p2records)#STEP1
718 x2,y2,z2,pures2 = C_VP_PP(css2, t1, sp2, '>0')#STEP2 ####! added pures
719 Pairphrase = 'Each of the following '+ str(len(pures2))+' sites is invariant across sequences of '+ t1+ ' and differentiates it from '+ t2+': '
720 counterPures = {}
721 for site in pures2:
722 counterPures[site] = "'or'".join(list(set([thing[site] for thing in css2[t2] if thing[site] != 'N'])))
723 Pairphrase = Pairphrase + str(site+corr)+" ('"+str(y2[site])+"' vs '"+str(counterPures[site])+"'), "
724 print(Pairphrase[:-2])
725 print("<p>",Pairphrase[:-2],'</h4>', file=h)#OCT2022
726 x2r,y2r,z2r,pures2r = C_VP_PP(css2, t2, sp2, '>0')#STEP2 ####! added pures
727 Pairphraser = 'Each of the following '+ str(len(pures2r))+' sites is invariant across sequences of '+ t2+ ' and differentiates it from '+ t1+': '
728 counterPuresr = {}
729 for site in pures2r:
730 counterPuresr[site] = "'or'".join(list(set([thing[site] for thing in css2[t1] if thing[site] != 'N'])))
731 Pairphraser = Pairphraser + str(site+corr)+" ('"+str(y2r[site])+"' vs '"+str(counterPuresr[site])+"'), "
732 print(Pairphraser[:-2])
733 print("<p>",Pairphraser[:-2],'</h4>', file=h)#OCT2022
734 h.close()
735
736 #OCT2022 - end
737 return contents, qCLADEs
738 if __name__ == "__main__":
739 c, q = mainprocessing()
740