0
|
1 import argparse
|
|
2 import os
|
|
3 import pandas
|
|
4 import pypandoc
|
|
5 import re
|
|
6 import subprocess
|
|
7 import sys
|
|
8
|
|
9 from Bio import SeqIO
|
|
10 from datetime import date
|
|
11 from mdutils.mdutils import MdUtils
|
14
|
12 # FIXME: TableOfContents doesn't work.
|
|
13 # from mdutils.tools import TableOfContents
|
0
|
14
|
|
15 CDC_ADVISORY = 'The analysis and report presented here should be treated as preliminary. Please contact the CDC/BDRD with any results regarding _Bacillus anthracis_.'
|
|
16
|
|
17
|
|
18 class PimaReport:
|
|
19
|
21
|
20 def __init__(self, analysis_name=None, amr_deletions_file=None, amr_matrix_files=None, assembler_version=None,
|
|
21 assembly_fasta_file=None, assembly_name=None, bedtools_version=None, blastn_version=None,
|
|
22 circos_files=None, compute_sequence_length_file=None, contig_coverage_file=None, dbkey=None,
|
26
|
23 dnadiff_snps_file=None, dnadiff_version=None, errors_file=None, feature_bed_files=None,
|
21
|
24 feature_png_files=None, flye_assembly_info_file=None, genome_insertions_file=None, gzipped=None,
|
26
|
25 illumina_forward_read_file=None, illumina_reverse_read_file=None, kraken2_report_file=None,
|
|
26 kraken2_version=None, minimap2_version=None, mutation_regions_bed_file=None,
|
|
27 mutation_regions_tsv_files=None, ont_file=None, pima_css=None, plasmids_file=None, quast_report_file=None,
|
21
|
28 read_type=None, reference_insertions_file=None, samtools_version=None, varscan_version=None):
|
0
|
29 self.ofh = open("process_log.txt", "w")
|
|
30
|
1
|
31 self.ofh.write("amr_deletions_file: %s\n" % str(amr_deletions_file))
|
|
32 self.ofh.write("amr_matrix_files: %s\n" % str(amr_matrix_files))
|
0
|
33 self.ofh.write("analysis_name: %s\n" % str(analysis_name))
|
21
|
34 self.ofh.write("assembler_version: %s\n" % str(assembler_version))
|
0
|
35 self.ofh.write("assembly_fasta_file: %s\n" % str(assembly_fasta_file))
|
|
36 self.ofh.write("assembly_name: %s\n" % str(assembly_name))
|
12
|
37 self.ofh.write("bedtools_version: %s\n" % str(bedtools_version))
|
2
|
38 self.ofh.write("blastn_version: %s\n" % str(blastn_version))
|
13
|
39 self.ofh.write("circos_files: %s\n" % str(circos_files))
|
1
|
40 self.ofh.write("compute_sequence_length_file: %s\n" % str(compute_sequence_length_file))
|
|
41 self.ofh.write("contig_coverage_file: %s\n" % str(contig_coverage_file))
|
|
42 self.ofh.write("dbkey: %s\n" % str(dbkey))
|
|
43 self.ofh.write("dnadiff_snps_file: %s\n" % str(dnadiff_snps_file))
|
2
|
44 self.ofh.write("dnadiff_version: %s\n" % str(dnadiff_version))
|
18
|
45 self.ofh.write("errors_file: %s\n" % str(errors_file))
|
0
|
46 self.ofh.write("feature_bed_files: %s\n" % str(feature_bed_files))
|
|
47 self.ofh.write("feature_png_files: %s\n" % str(feature_png_files))
|
1
|
48 self.ofh.write("flye_assembly_info_file: %s\n" % str(flye_assembly_info_file))
|
0
|
49 self.ofh.write("gzipped: %s\n" % str(gzipped))
|
1
|
50 self.ofh.write("genome_insertions_file: %s\n" % str(genome_insertions_file))
|
26
|
51 self.ofh.write("illumina_forward_read_file: %s\n" % str(illumina_forward_read_file))
|
|
52 self.ofh.write("illumina_reverse_read_file: %s\n" % str(illumina_reverse_read_file))
|
2
|
53 self.ofh.write("kraken2_report_file: %s\n" % str(kraken2_report_file))
|
|
54 self.ofh.write("kraken2_version: %s\n" % str(kraken2_version))
|
12
|
55 self.ofh.write("minimap2_version: %s\n" % str(minimap2_version))
|
0
|
56 self.ofh.write("mutation_regions_bed_file: %s\n" % str(mutation_regions_bed_file))
|
|
57 self.ofh.write("mutation_regions_tsv_files: %s\n" % str(mutation_regions_tsv_files))
|
26
|
58 self.ofh.write("ont_file: %s\n" % str(ont_file))
|
0
|
59 self.ofh.write("pima_css: %s\n" % str(pima_css))
|
1
|
60 self.ofh.write("plasmids_file: %s\n" % str(plasmids_file))
|
13
|
61 self.ofh.write("quast_report_file: %s\n" % str(quast_report_file))
|
18
|
62 self.ofh.write("read_type: %s\n" % str(read_type))
|
1
|
63 self.ofh.write("reference_insertions_file: %s\n" % str(reference_insertions_file))
|
12
|
64 self.ofh.write("samtools_version: %s\n" % str(samtools_version))
|
|
65 self.ofh.write("varscan_version: %s\n" % str(varscan_version))
|
0
|
66
|
|
67 # General
|
|
68 self.doc = None
|
|
69 self.report_md = 'pima_report.md'
|
|
70
|
|
71 # Inputs
|
1
|
72 self.amr_deletions_file = amr_deletions_file
|
|
73 self.amr_matrix_files = amr_matrix_files
|
13
|
74 self.analysis_name = analysis_name.split('_')[0]
|
|
75 self.ofh.write("self.analysis_name: %s\n" % str(self.analysis_name))
|
21
|
76 if assembler_version is None:
|
|
77 self.assembler_version = 'assembler (version unknown)'
|
|
78 else:
|
|
79 if read_type == 'ont':
|
|
80 # Assembler is flye.
|
|
81 assembler_version = assembler_version.rstrip(' _assembly info_')
|
|
82 else:
|
|
83 # Assembler is spades.
|
|
84 assembler_version = assembler_version.rstrip(' _contigs')
|
|
85 self.assembler_version = re.sub('_', '.', assembler_version)
|
0
|
86 self.assembly_fasta_file = assembly_fasta_file
|
12
|
87 self.assembly_name = re.sub('_', '.', assembly_name.rstrip(' _consensus_'))
|
|
88 if bedtools_version is None:
|
|
89 self.bedtools_version = 'bedtools (version unknown)'
|
|
90 else:
|
|
91 self.bedtools_version = re.sub('_', '.', bedtools_version.rstrip(' _genome insertions'))
|
|
92 if blastn_version is None:
|
|
93 self.blastn_version = 'blastn (version unknown)'
|
|
94 else:
|
|
95 self.blastn_version = re.sub('_', '.', blastn_version.rstrip(' _features_'))
|
13
|
96 self.circos_files = circos_files
|
1
|
97 self.compute_sequence_length_file = compute_sequence_length_file
|
|
98 self.contig_coverage_file = contig_coverage_file
|
|
99 self.dbkey = dbkey
|
|
100 self.dnadiff_snps_file = dnadiff_snps_file
|
12
|
101 if dnadiff_version is None:
|
|
102 self.dnadiff_version = 'dnadiff (version unknown)'
|
|
103 else:
|
|
104 self.dnadiff_version = re.sub('_', '.', dnadiff_version.rstrip(' _snps_'))
|
18
|
105 self.errors_file = errors_file
|
0
|
106 self.feature_bed_files = feature_bed_files
|
|
107 self.feature_png_files = feature_png_files
|
1
|
108 self.flye_assembly_info_file = flye_assembly_info_file
|
0
|
109 self.gzipped = gzipped
|
1
|
110 self.genome_insertions_file = genome_insertions_file
|
26
|
111 self.illumina_forward_read_file = illumina_forward_read_file
|
|
112 self.illumina_reverse_read_file = illumina_reverse_read_file
|
2
|
113 self.kraken2_report_file = kraken2_report_file
|
12
|
114 if kraken2_version is None:
|
|
115 self.kraken2_version = 'kraken2 (version unknown)'
|
|
116 else:
|
|
117 self.kraken2_version = re.sub('_', '.', kraken2_version.rstrip(' _report_'))
|
|
118 if minimap2_version is None:
|
|
119 self.minimap2_version = 'minimap2 (version unknown)'
|
|
120 else:
|
|
121 self.minimap2_version = re.sub('_', '.', minimap2_version)
|
0
|
122 self.mutation_regions_bed_file = mutation_regions_bed_file
|
|
123 self.mutation_regions_tsv_files = mutation_regions_tsv_files
|
|
124 self.pima_css = pima_css
|
1
|
125 self.plasmids_file = plasmids_file
|
13
|
126 self.quast_report_file = quast_report_file
|
18
|
127 self.read_type = read_type.upper()
|
13
|
128 self.reference_insertions_file = reference_insertions_file
|
1
|
129 self.reference_insertions_file = reference_insertions_file
|
12
|
130 if samtools_version is None:
|
|
131 self.samtools_version = 'samtools (version unknown)'
|
|
132 else:
|
|
133 self.samtools_version = re.sub('_', '.', samtools_version)
|
|
134 if varscan_version is None:
|
|
135 self.varscan_version = 'varscan (version unknown)'
|
|
136 else:
|
|
137 self.varscan_version = re.sub('_', '.', varscan_version)
|
0
|
138
|
|
139 # Titles
|
|
140 self.alignment_title = 'Comparison with reference'
|
|
141 self.alignment_notes_title = 'Alignment notes'
|
|
142 self.amr_matrix_title = 'AMR matrix'
|
|
143 self.assembly_methods_title = 'Assembly'
|
|
144 self.assembly_notes_title = 'Assembly notes'
|
|
145 self.basecalling_title = 'Basecalling'
|
|
146 self.basecalling_methods_title = 'Basecalling'
|
|
147 self.contamination_methods_title = 'Contamination check'
|
|
148 self.contig_alignment_title = 'Alignment vs. reference contigs'
|
|
149 self.feature_title = 'Features found in the assembly'
|
|
150 self.feature_methods_title = 'Feature annotation'
|
|
151 self.feature_plot_title = 'Feature annotation plots'
|
|
152 self.large_indel_title = 'Large insertions & deletions'
|
|
153 self.methods_title = 'Methods'
|
18
|
154 self.mutation_errors_title = 'Errors finding mutations in the sample'
|
0
|
155 self.mutation_title = 'Mutations found in the sample'
|
|
156 self.mutation_methods_title = 'Mutation screening'
|
|
157 self.plasmid_methods_title = 'Plasmid annotation'
|
1
|
158 self.plasmid_title = 'Plasmid annotation'
|
21
|
159 self.reference_genome_title = 'Reference genome'
|
0
|
160 self.reference_methods_title = 'Reference comparison'
|
|
161 self.snp_indel_title = 'SNPs and small indels'
|
13
|
162 self.summary_title = 'Summary'
|
0
|
163
|
|
164 # Methods
|
|
165 self.methods = pandas.Series(dtype='float64')
|
|
166 self.methods[self.contamination_methods_title] = pandas.Series(dtype='float64')
|
|
167 self.methods[self.assembly_methods_title] = pandas.Series(dtype='float64')
|
21
|
168 self.methods[self.reference_genome_title] = pandas.Series(dtype='float64')
|
0
|
169 self.methods[self.reference_methods_title] = pandas.Series(dtype='float64')
|
|
170 self.methods[self.mutation_methods_title] = pandas.Series(dtype='float64')
|
|
171 self.methods[self.feature_methods_title] = pandas.Series(dtype='float64')
|
|
172 self.methods[self.plasmid_methods_title] = pandas.Series(dtype='float64')
|
|
173
|
|
174 # Notes
|
|
175 self.assembly_notes = pandas.Series(dtype=object)
|
|
176 self.alignment_notes = pandas.Series(dtype=object)
|
|
177 self.contig_alignment = pandas.Series(dtype=object)
|
|
178
|
|
179 # Values
|
|
180 self.assembly_size = 0
|
|
181 self.contig_info = None
|
|
182 self.feature_hits = pandas.Series(dtype='float64')
|
26
|
183 self.ont_fast5 = None
|
|
184 self.ont_file = ont_file
|
|
185 self.ont_n50 = None
|
|
186 self.ont_read_count = None
|
14
|
187 # TODO: should the following be passed as a parameter?
|
|
188 self.ont_coverage_min = 30
|
|
189 # TODO: should the following be passed as a parameter?
|
1
|
190 self.ont_n50_min = 2500
|
26
|
191
|
21
|
192 if self.read_type == 'ONT':
|
|
193 self.ont_raw_fastq = self.analysis_name
|
26
|
194 self.ont_bases = 0
|
|
195 self.illumina_bases = None
|
21
|
196 self.illumina_fastq = None
|
26
|
197 self.illumina_length_mean = None
|
|
198 self.illumina_read_count = None
|
21
|
199 else:
|
26
|
200 self.illumina_fastq = self.analysis_name
|
|
201 self.illumina_bases = 0
|
|
202 self.illumina_length_mean = 0
|
|
203 self.illumina_read_count = 0
|
|
204 self.ont_bases = None
|
21
|
205 self.ont_raw_fastq = None
|
0
|
206
|
|
207 # Actions
|
|
208 self.did_guppy_ont_fast5 = False
|
|
209 self.did_qcat_ont_fastq = False
|
21
|
210 self.ofh.write("self.read_type: %s\n" % str(self.read_type))
|
|
211 if self.read_type == 'ONT':
|
26
|
212 self.info_ont_fastq(self.ont_file)
|
21
|
213 else:
|
26
|
214 self.info_illumina_fastq([self.illumina_forward_read_file, self.illumina_reverse_read_file])
|
0
|
215 self.load_contig_info()
|
|
216
|
|
217 def run_command(self, command):
|
|
218 self.ofh.write("\nXXXXXX In run_command, command:\n%s\n\n" % str(command))
|
|
219 try:
|
|
220 return re.split('\\n', subprocess.check_output(command, shell=True).decode('utf-8'))
|
|
221 except Exception:
|
|
222 message = 'Command %s failed: exiting...' % command
|
|
223 sys.exit(message)
|
|
224
|
|
225 def format_kmg(self, number, decimals=0):
|
|
226 self.ofh.write("\nXXXXXX In format_kmg, number:\n%s\n" % str(number))
|
|
227 self.ofh.write("XXXXXX In format_kmg, decimals:\n%s\n\n" % str(decimals))
|
|
228 if number == 0:
|
|
229 return '0'
|
|
230 magnitude_powers = [10**9, 10**6, 10**3, 1]
|
|
231 magnitude_units = ['G', 'M', 'K', '']
|
|
232 for i in range(len(magnitude_units)):
|
|
233 if number >= magnitude_powers[i]:
|
|
234 magnitude_power = magnitude_powers[i]
|
|
235 magnitude_unit = magnitude_units[i]
|
|
236 return ('{:0.' + str(decimals) + 'f}').format(number / magnitude_power) + magnitude_unit
|
|
237
|
|
238 def load_contig_info(self):
|
|
239 self.contig_info = pandas.Series(dtype=object)
|
|
240 self.contig_info[self.read_type] = pandas.read_csv(self.contig_coverage_file, header=None, index_col=None, sep='\t').sort_values(1, axis=0, ascending=False)
|
|
241 self.contig_info[self.read_type].columns = ['contig', 'size', 'coverage']
|
26
|
242 mean_coverage = (self.contig_info[self.read_type].iloc[:, 1] * self.contig_info[self.read_type].iloc[:, 2]).sum() / self.contig_info[self.read_type].iloc[:, 1].sum()
|
|
243 if mean_coverage <= self.ont_coverage_min:
|
|
244 warning = '%s mean coverage ({:.0f}X) is less than the recommended minimum ({:.0f}X).'.format(mean_coverage, self.ont_coverage_min) % self.read_type
|
1
|
245 self.assembly_notes = self.assembly_notes.append(pandas.Series(warning))
|
|
246 # Report if some contigs have low coverage.
|
|
247 low_coverage = self.contig_info[self.read_type].loc[self.contig_info[self.read_type]['coverage'] < self.ont_coverage_min, :]
|
|
248 if low_coverage.shape[0] >= 0:
|
|
249 for contig_i in range(low_coverage.shape[0]):
|
|
250 warning = '%s coverage of {:s} ({:.0f}X) is less than the recommended minimum ({:.0f}X).'.format(low_coverage.iloc[contig_i, 0], low_coverage.iloc[contig_i, 2], self.ont_coverage_min) % self.read_type
|
|
251 self.assembly_notes = self.assembly_notes.append(pandas.Series(warning))
|
26
|
252 # See if some contigs have anonymously low coverage.
|
|
253 fold_coverage = self.contig_info[self.read_type]['coverage'] / mean_coverage
|
1
|
254 low_coverage = self.contig_info[self.read_type].loc[fold_coverage < 1 / 5, :]
|
8
|
255 if low_coverage.shape[0] >= 0:
|
1
|
256 for contig_i in range(low_coverage.shape[0]):
|
26
|
257 warning = '%s coverage of {:s} ({:.0f}X) is less than 1/5 the mean coverage ({:.0f}X).'.format(low_coverage.iloc[contig_i, 0], low_coverage.iloc[contig_i, 2], mean_coverage) % self.read_type
|
1
|
258 self.assembly_notes = self.assembly_notes.append(pandas.Series(warning))
|
0
|
259
|
|
260 def load_fasta(self, fasta):
|
|
261 sequence = pandas.Series(dtype=object)
|
|
262 for contig in SeqIO.parse(fasta, 'fasta'):
|
|
263 sequence[contig.id] = contig
|
|
264 return sequence
|
|
265
|
|
266 def load_assembly(self):
|
|
267 self.assembly = self.load_fasta(self.assembly_fasta_file)
|
|
268 self.num_assembly_contigs = len(self.assembly)
|
26
|
269 self.assembly_size = self.format_kmg(sum([len(x) for x in self.assembly]), decimals=1)
|
0
|
270
|
26
|
271 def info_illumina_fastq(self, illumina_read_files):
|
0
|
272 self.ofh.write("\nXXXXXX In info_illumina_fastq\n\n")
|
|
273 if self.gzipped:
|
|
274 opener = 'gunzip -c'
|
|
275 else:
|
|
276 opener = 'cat'
|
26
|
277 for fastq_file in illumina_read_files:
|
|
278 command = ' '.join([opener,
|
|
279 fastq_file,
|
|
280 '| awk \'{getline;s += length($1);getline;getline;}END{print s/(NR/4)"\t"(NR/4)"\t"s}\''])
|
|
281 output = self.run_command(command)
|
|
282 self.ofh.write("output:\n%s\n" % str(output))
|
|
283 self.ofh.write("re.split('\\t', self.run_command(command)[0]:\n%s\n" % str(re.split('\\t', self.run_command(command)[0])))
|
|
284 values = []
|
|
285 for i in re.split('\\t', self.run_command(command)[0]):
|
|
286 if i == '':
|
|
287 values.append(float('nan'))
|
|
288 else:
|
|
289 values.append(float(i))
|
|
290 self.ofh.write("values:\n%s\n" % str(values))
|
|
291 self.ofh.write("values[0]:\n%s\n" % str(values[0]))
|
|
292 self.illumina_length_mean += values[0]
|
|
293 self.ofh.write("values[1]:\n%s\n" % str(values[1]))
|
|
294 self.illumina_read_count += int(values[1])
|
|
295 self.ofh.write("values[2]:\n%s\n" % str(values[2]))
|
|
296 self.illumina_bases += int(values[2])
|
|
297 self.illumina_length_mean /= 2
|
0
|
298 self.illumina_bases = self.format_kmg(self.illumina_bases, decimals=1)
|
|
299
|
|
300 def start_doc(self):
|
13
|
301 header_text = 'Analysis of ' + self.analysis_name
|
|
302 self.doc = MdUtils(file_name=self.report_md, title=header_text)
|
0
|
303
|
14
|
304 def add_table_of_contents(self):
|
|
305 self.doc.create_marker(text_marker="TableOfContents")
|
|
306 self.doc.new_line()
|
|
307 self.doc.new_line('<div style="page-break-after: always;"></div>')
|
|
308 self.doc.new_line()
|
|
309
|
0
|
310 def add_run_information(self):
|
|
311 self.ofh.write("\nXXXXXX In add_run_information\n\n")
|
|
312 self.doc.new_line()
|
|
313 self.doc.new_header(1, 'Run information')
|
|
314 # Tables in md.utils are implemented as a wrapping function.
|
|
315 Table_list = [
|
|
316 "Category",
|
|
317 "Information",
|
|
318 "Date",
|
|
319 date.today(),
|
|
320 "ONT FAST5",
|
14
|
321 self.wordwrap_markdown(self.ont_fast5),
|
0
|
322 "ONT FASTQ",
|
14
|
323 self.wordwrap_markdown(self.ont_raw_fastq),
|
0
|
324 "Illumina FASTQ",
|
21
|
325 self.wordwrap_markdown(self.illumina_fastq),
|
0
|
326 "Assembly",
|
|
327 self.wordwrap_markdown(self.assembly_name),
|
|
328 "Reference",
|
|
329 self.wordwrap_markdown(self.dbkey),
|
|
330 ]
|
|
331 self.doc.new_table(columns=2, rows=7, text=Table_list, text_align='left')
|
|
332 self.doc.new_line()
|
14
|
333 # FIXME: the following doesn't work.
|
|
334 # self.add_table_of_contents()
|
0
|
335 self.doc.new_line()
|
|
336
|
|
337 def add_ont_library_information(self):
|
|
338 self.ofh.write("\nXXXXXX In add_ont_library_information\n\n")
|
|
339 if self.ont_n50 is None:
|
|
340 return
|
|
341 self.doc.new_line()
|
|
342 self.doc.new_header(2, 'ONT library statistics')
|
|
343 Table_List = [
|
|
344 "Category",
|
|
345 "Quantity",
|
|
346 "ONT N50",
|
|
347 '{:,}'.format(self.ont_n50),
|
|
348 "ONT reads",
|
|
349 '{:,}'.format(self.ont_read_count),
|
|
350 "ONT bases",
|
|
351 '{:s}'.format(self.ont_bases),
|
|
352 "Illumina FASTQ",
|
17
|
353 "N/A",
|
0
|
354 "Assembly",
|
|
355 self.wordwrap_markdown(self.assembly_name),
|
|
356 "Reference",
|
|
357 self.wordwrap_markdown(self.dbkey),
|
|
358 ]
|
|
359 self.doc.new_table(columns=2, rows=7, text=Table_List, text_align='left')
|
|
360 self.doc.new_line()
|
|
361
|
|
362 def add_illumina_library_information(self):
|
|
363 self.ofh.write("\nXXXXXX In add_illumina_library_information\n\n")
|
14
|
364 if self.illumina_length_mean is None:
|
0
|
365 return
|
|
366 self.doc.new_line()
|
|
367 self.doc.new_header(2, 'Illumina library statistics')
|
|
368 Table_List = [
|
|
369 "Illumina Info.",
|
|
370 "Quantity",
|
|
371 'Illumina mean length',
|
|
372 '{:.1f}'.format(self.illumina_length_mean),
|
|
373 'Illumina reads',
|
|
374 '{:,}'.format(self.illumina_read_count),
|
|
375 'Illumina bases',
|
|
376 '{:s}'.format(self.illumina_bases)
|
|
377 ]
|
|
378 self.doc.new_table(columns=2, rows=4, text=Table_List, text_align='left')
|
|
379
|
8
|
380 def evaluate_assembly(self):
|
1
|
381 assembly_info = pandas.read_csv(self.compute_sequence_length_file, sep='\t', header=None)
|
|
382 assembly_info.columns = ['contig', 'length']
|
|
383 self.contig_sizes = assembly_info
|
|
384 # Take a look at the number of contigs, their sizes,
|
|
385 # and circularity. Warn if things don't look good.
|
|
386 if assembly_info.shape[0] > 4:
|
|
387 warning = 'Assembly produced {:d} contigs, more than ususally expected; assembly may be fragmented'.format(assembly_info.shape[0])
|
|
388 self.assembly_notes = self.assembly_notes.append(pandas.Series(warning))
|
|
389 small_contigs = assembly_info.loc[assembly_info['length'] <= 3000, :]
|
|
390 if small_contigs.shape[0] > 0:
|
|
391 warning = 'Assembly produced {:d} small contigs ({:s}); assembly may include spurious sequences.'.format(small_contigs.shape[0], ', '.join(small_contigs['contig']))
|
|
392 self.assembly_notes = self.assembly_notes.append(pandas.Series(warning))
|
|
393
|
0
|
394 def add_assembly_information(self):
|
|
395 self.ofh.write("\nXXXXXX In add_assembly_information\n\n")
|
|
396 if self.assembly_fasta_file is None:
|
|
397 return
|
|
398 self.load_assembly()
|
|
399 self.doc.new_line()
|
|
400 self.doc.new_header(2, 'Assembly statistics')
|
|
401 Table_List = [
|
|
402 "Category",
|
|
403 "Information",
|
|
404 "Contigs",
|
|
405 str(self.num_assembly_contigs),
|
|
406 "Assembly size",
|
|
407 str(self.assembly_size),
|
|
408 ]
|
|
409 self.doc.new_table(columns=2, rows=3, text=Table_List, text_align='left')
|
|
410
|
|
411 def info_ont_fastq(self, fastq_file):
|
|
412 self.ofh.write("\nXXXXXX In info_ont_fastq, fastq_file:\n%s\n\n" % str(fastq_file))
|
|
413 opener = 'cat'
|
|
414 if self.gzipped:
|
|
415 opener = 'gunzip -c'
|
|
416 else:
|
|
417 opener = 'cat'
|
|
418 command = ' '.join([opener,
|
|
419 fastq_file,
|
|
420 '| awk \'{getline;print length($0);s += length($1);getline;getline;}END{print "+"s}\'',
|
|
421 '| sort -gr',
|
|
422 '| awk \'BEGIN{bp = 0;f = 0}',
|
|
423 '{if(NR == 1){sub(/+/, "", $1);s=$1}else{bp += $1;if(bp > s / 2 && f == 0){n50 = $1;f = 1}}}',
|
|
424 'END{printf "%d\\t%d\\t%d\\n", n50, (NR - 1), s;exit}\''])
|
|
425 result = list(re.split('\\t', self.run_command(command)[0]))
|
|
426 if result[1] == '0':
|
21
|
427 warning = 'No ONT reads found'
|
|
428 self.assembly_notes = self.assembly_notes.append(pandas.Series(warning))
|
14
|
429 self.ont_n50, self.ont_read_count, ont_raw_bases = [int(i) for i in result]
|
0
|
430 command = ' '.join([opener,
|
|
431 fastq_file,
|
|
432 '| awk \'{getline;print length($0);getline;getline;}\''])
|
|
433 result = self.run_command(command)
|
|
434 result = list(filter(lambda x: x != '', result))
|
14
|
435 self.ont_bases = self.format_kmg(ont_raw_bases, decimals=1)
|
|
436 if self.ont_n50 <= self.ont_n50_min:
|
|
437 warning = 'ONT N50 (%s) is less than the recommended minimum (%s)' % (str(self.ont_n50), str(self.ont_n50_min))
|
1
|
438 self.assembly_notes = self.assembly_notes.append(pandas.Series(warning))
|
0
|
439
|
|
440 def wordwrap_markdown(self, string):
|
|
441 if string:
|
|
442 if len(string) < 35:
|
|
443 return string
|
|
444 else:
|
|
445 if '/' in string:
|
|
446 adjust = string.split('/')
|
|
447 out = ''
|
|
448 max = 35
|
|
449 for i in adjust:
|
|
450 out = out + '/' + i
|
|
451 if len(out) > max:
|
|
452 out += '<br>'
|
|
453 max += 35
|
|
454 return out
|
|
455 else:
|
|
456 out = [string[i:i + 35] for i in range(0, len(string), 50)]
|
|
457 return '<br>'.join(out)
|
|
458 else:
|
|
459 return string
|
|
460
|
|
461 def add_contig_info(self):
|
|
462 self.ofh.write("\nXXXXXX In add_contig_info\n\n")
|
26
|
463 if self.contig_info is None or self.read_type not in self.contig_info.index:
|
0
|
464 return
|
26
|
465 self.doc.new_line()
|
|
466 self.doc.new_header(2, 'Assembly coverage by ' + self.read_type)
|
|
467 Table_List = ["Contig", "Length (bp)", "Coverage (X)"]
|
|
468 formatted = self.contig_info[self.read_type].copy()
|
|
469 formatted.iloc[:, 1] = formatted.iloc[:, 1].apply(lambda x: '{:,}'.format(x))
|
|
470 for i in range(self.contig_info[self.read_type].shape[0]):
|
|
471 Table_List = Table_List + formatted.iloc[i, :].values.tolist()
|
|
472 row_count = int(len(Table_List) / 3)
|
|
473 self.doc.new_table(columns=3, rows=row_count, text=Table_List, text_align='left')
|
0
|
474
|
|
475 def add_assembly_notes(self):
|
|
476 self.ofh.write("\nXXXXXX In add_assembly_notes\n\n")
|
|
477 if len(self.assembly_notes) == 0:
|
|
478 return
|
|
479 self.doc.new_line()
|
|
480 self.doc.new_line('<div style="page-break-after: always;"></div>')
|
|
481 self.doc.new_line()
|
|
482 self.doc.new_header(2, self.assembly_notes_title)
|
1
|
483 for note in self.assembly_notes:
|
|
484 self.doc.new_line(note)
|
0
|
485
|
|
486 def add_contamination(self):
|
|
487 self.ofh.write("\nXXXXXX In add_contamination\n\n")
|
2
|
488 if self.kraken2_report_file is None:
|
0
|
489 return
|
2
|
490 # Read in the Kraken fractions and pull out the useful parts
|
8
|
491 kraken_fracs = pandas.read_csv(self.kraken2_report_file, delimiter='\t', header=None)
|
|
492 kraken_fracs.index = kraken_fracs.iloc[:, 4].values
|
|
493 kraken_fracs = kraken_fracs.loc[kraken_fracs.iloc[:, 3].str.match('[UG]1?'), :]
|
|
494 kraken_fracs = kraken_fracs.loc[(kraken_fracs.iloc[:, 0] >= 1) | (kraken_fracs.iloc[:, 3] == 'U'), :]
|
|
495 kraken_fracs = kraken_fracs.iloc[:, [0, 1, 3, 5]]
|
|
496 kraken_fracs.columns = ['Fraction', 'Reads', 'Level', 'Taxa']
|
|
497 kraken_fracs['Fraction'] = (kraken_fracs['Fraction'] / 100).round(4)
|
|
498 kraken_fracs.sort_values(by='Fraction', inplace=True, ascending=False)
|
|
499 kraken_fracs['Taxa'] = kraken_fracs['Taxa'].str.lstrip()
|
0
|
500 self.doc.new_line()
|
|
501 self.doc.new_header(2, 'Contamination check')
|
10
|
502 self.doc.new_line(self.read_type + ' classifications')
|
|
503 self.doc.new_line()
|
|
504 Table_List = ["Percent of Reads", "Reads", "Level", "Label"]
|
|
505 for index, row in kraken_fracs.iterrows():
|
|
506 Table_List = Table_List + row.tolist()
|
|
507 row_count = int(len(Table_List) / 4)
|
|
508 self.doc.new_table(columns=4, rows=row_count, text=Table_List, text_align='left')
|
|
509 if self.contamination_methods_title not in self.methods:
|
|
510 self.methods[self.contamination_methods_title] = ''
|
11
|
511 method = '%s was used to assign the raw reads into taxa.' % self.kraken2_version.rstrip('report')
|
0
|
512 self.methods[self.contamination_methods_title] = self.methods[self.contamination_methods_title].append(pandas.Series(method))
|
|
513
|
|
514 def add_alignment(self):
|
|
515 self.ofh.write("\nXXXXXX In add_alignment\n\n")
|
13
|
516 if self.quast_report_file is not None:
|
|
517 # Process quast values.
|
|
518 quast_report = pandas.read_csv(self.quast_report_file, header=0, index_col=0, sep='\t')
|
|
519 quast_mismatches = int(float(quast_report.loc['# mismatches per 100 kbp', :][0]) * (float(quast_report.loc['Total length (>= 0 bp)', :][0]) / 100000.))
|
|
520 quast_indels = int(float(quast_report.loc['# indels per 100 kbp', :][0]) * (float(quast_report.loc['Total length (>= 0 bp)', :][0]) / 100000.))
|
|
521 self.doc.new_line()
|
|
522 self.doc.new_header(level=2, title=self.alignment_title)
|
|
523 self.doc.new_line()
|
|
524 self.doc.new_header(level=3, title=self.snp_indel_title)
|
|
525 Table_1 = [
|
|
526 "Category",
|
|
527 "Quantity",
|
|
528 'SNPs',
|
|
529 '{:,}'.format(quast_mismatches),
|
|
530 'Small indels',
|
|
531 '{:,}'.format(quast_indels)
|
|
532 ]
|
0
|
533 self.doc.new_table(columns=2, rows=3, text=Table_1, text_align='left')
|
|
534 self.doc.new_line('<div style="page-break-after: always;"></div>')
|
|
535 self.doc.new_line()
|
13
|
536 # TODO: self.alignment_notes is not currently populated.
|
0
|
537 if len(self.alignment_notes) > 0:
|
|
538 self.doc.new_header(level=3, title=self.alignment_notes_title)
|
|
539 for note in self.alignment_notes:
|
|
540 self.doc.new_line(note)
|
13
|
541 if len(self.circos_files) > 0:
|
|
542 # Add circos PNG files.
|
|
543 for circos_file in self.circos_files:
|
|
544 contig = os.path.basename(circos_file)
|
|
545 contig_title = 'Alignment to %s' % contig
|
|
546 self.doc.new_line()
|
|
547 self.doc.new_header(level=3, title=contig_title)
|
25
|
548 self.doc.new_line('Blue color indicates query sequences aligned to the reference sequence, which is shown in yellow')
|
13
|
549 self.doc.new_line(self.doc.new_inline_image(text='contig_title', path=os.path.abspath(circos_file)))
|
|
550 self.doc.new_line('<div style="page-break-after: always;"></div>')
|
|
551 self.doc.new_line()
|
21
|
552 if self.dbkey == 'ref_genome':
|
|
553 headers = ["* Chromosome - NC_007530.2 Bacillus anthracis str. 'Ames Ancestor', complete sequence",
|
|
554 "* pXO1 - NC_007322.2 Bacillus anthracis str. 'Ames Ancestor' plasmid pXO1, complete sequence",
|
|
555 "* pXO2 - NC_007323.3 Bacillus anthracis str. 'Ames Ancestor' plasmid pXO2, complete sequence"]
|
|
556 method = '\n'.join(headers)
|
|
557 self.methods[self.reference_genome_title] = self.methods[self.reference_genome_title].append(pandas.Series(method))
|
|
558 method = 'The genome assembly was aligned against the reference sequence using %s.' % self.dnadiff_version
|
0
|
559 self.methods[self.reference_methods_title] = self.methods[self.reference_methods_title].append(pandas.Series(method))
|
|
560
|
|
561 def add_features(self):
|
|
562 self.ofh.write("\nXXXXXX In add_features\n\n")
|
|
563 if len(self.feature_bed_files) == 0:
|
|
564 return
|
|
565 for bbf in self.feature_bed_files:
|
|
566 if os.path.getsize(bbf) > 0:
|
|
567 best = pandas.read_csv(filepath_or_buffer=bbf, sep='\t', header=None)
|
|
568 self.feature_hits[os.path.basename(bbf)] = best
|
|
569 if len(self.feature_hits) == 0:
|
|
570 return
|
|
571 self.ofh.write("self.feature_hits: %s\n" % str(self.feature_hits))
|
|
572 self.doc.new_line()
|
|
573 self.doc.new_header(level=2, title=self.feature_title)
|
|
574 for feature_name in self.feature_hits.index.tolist():
|
|
575 self.ofh.write("feature_name: %s\n" % str(feature_name))
|
|
576 features = self.feature_hits[feature_name].copy()
|
|
577 self.ofh.write("features: %s\n" % str(features))
|
|
578 if features.shape[0] == 0:
|
|
579 continue
|
|
580 features.iloc[:, 1] = features.iloc[:, 1].apply(lambda x: '{:,}'.format(x))
|
|
581 features.iloc[:, 2] = features.iloc[:, 2].apply(lambda x: '{:,}'.format(x))
|
|
582 self.doc.new_line()
|
|
583 self.doc.new_header(level=3, title=feature_name)
|
|
584 if (features.shape[0] == 0):
|
|
585 continue
|
|
586 for contig in pandas.unique(features.iloc[:, 0]):
|
|
587 self.ofh.write("contig: %s\n" % str(contig))
|
|
588 self.doc.new_line(contig)
|
|
589 contig_features = features.loc[(features.iloc[:, 0] == contig), :]
|
|
590 self.ofh.write("contig_features: %s\n" % str(contig_features))
|
|
591 Table_List = ['Start', 'Stop', 'Feature', 'Identity (%)', 'Strand']
|
|
592 for i in range(contig_features.shape[0]):
|
|
593 self.ofh.write("i: %s\n" % str(i))
|
|
594 feature = contig_features.iloc[i, :].copy(deep=True)
|
|
595 self.ofh.write("feature: %s\n" % str(feature))
|
|
596 feature[4] = '{:.3f}'.format(feature[4])
|
2
|
597 self.ofh.write("feature[1:].values.tolist(): %s\n" % str(feature[1:].values.tolist()))
|
|
598 Table_List = Table_List + feature[1:].values.tolist()
|
0
|
599 self.ofh.write("Table_List: %s\n" % str(Table_List))
|
|
600 row_count = int(len(Table_List) / 5)
|
|
601 self.ofh.write("row_count: %s\n" % str(row_count))
|
|
602 self.doc.new_line()
|
1
|
603 self.ofh.write("Before new_table, len(Table_List):: %s\n" % str(len(Table_List)))
|
|
604 self.doc.new_table(columns=5, rows=row_count, text=Table_List, text_align='left')
|
12
|
605 blastn_version = 'The genome assembly was queried for features using %s.' % self.blastn_version
|
|
606 bedtools_version = 'Feature hits were clustered using %s and the highest scoring hit for each cluster was reported.' % self.bedtools_version
|
0
|
607 method = '%s %s' % (blastn_version, bedtools_version)
|
|
608 self.methods[self.feature_methods_title] = self.methods[self.feature_methods_title].append(pandas.Series(method))
|
|
609
|
|
610 def add_feature_plots(self):
|
|
611 self.ofh.write("\nXXXXXX In add_feature_plots\n\n")
|
|
612 if len(self.feature_png_files) == 0:
|
|
613 return
|
|
614 self.doc.new_line()
|
|
615 self.doc.new_header(level=2, title='Feature Plots')
|
|
616 self.doc.new_paragraph('Only contigs with features are shown')
|
|
617 for feature_png_file in self.feature_png_files:
|
|
618 self.doc.new_line(self.doc.new_inline_image(text='Analysis', path=os.path.abspath(feature_png_file)))
|
|
619
|
|
620 def add_mutations(self):
|
|
621 self.ofh.write("\nXXXXXX In add_mutations\n\n")
|
|
622 if len(self.mutation_regions_tsv_files) == 0:
|
|
623 return
|
8
|
624 try:
|
0
|
625 mutation_regions = pandas.read_csv(self.mutation_regions_bed_file, sep='\t', header=0, index_col=False)
|
|
626 except Exception:
|
|
627 # Likely an empty file.
|
|
628 return
|
|
629 amr_mutations = pandas.Series(dtype=object)
|
|
630 for region_i in range(mutation_regions.shape[0]):
|
|
631 region = mutation_regions.iloc[region_i, :]
|
|
632 region_name = str(region['name'])
|
|
633 self.ofh.write("Processing mutations for region %s\n" % region_name)
|
|
634 region_mutations_tsv_name = '%s_mutations.tsv' % region_name
|
|
635 if region_mutations_tsv_name not in self.mutation_regions_tsv_files:
|
|
636 continue
|
|
637 region_mutations_tsv = self.mutation_regions_tsv_files[region_mutations_tsv_name]
|
8
|
638 try:
|
0
|
639 region_mutations = pandas.read_csv(region_mutations_tsv, sep='\t', header=0, index_col=False)
|
|
640 except Exception:
|
|
641 region_mutations = pandas.DataFrame()
|
|
642 if region_mutations.shape[0] == 0:
|
|
643 continue
|
1
|
644 # Figure out what kind of mutations are in this region.
|
0
|
645 region_mutation_types = pandas.Series(['snp'] * region_mutations.shape[0], name='TYPE', index=region_mutations.index)
|
|
646 region_mutation_types[region_mutations['REF'].str.len() != region_mutations['ALT'].str.len()] = 'small-indel'
|
|
647 region_mutation_drugs = pandas.Series(region['drug'] * region_mutations.shape[0], name='DRUG', index=region_mutations.index)
|
|
648 region_notes = pandas.Series(region['note'] * region_mutations.shape[0], name='NOTE', index=region_mutations.index)
|
|
649 region_mutations = pandas.concat([region_mutations, region_mutation_types, region_mutation_drugs, region_notes], axis=1)
|
|
650 region_mutations = region_mutations[['#CHROM', 'POS', 'TYPE', 'REF', 'ALT', 'DRUG', 'NOTE']]
|
|
651 amr_mutations[region['name']] = region_mutations
|
2
|
652 if (amr_mutations.shape[0] > 0):
|
|
653 # Report the mutations.
|
0
|
654 self.doc.new_line()
|
2
|
655 self.doc.new_header(level=2, title=self.mutation_title)
|
|
656 for region_name in amr_mutations.index.tolist():
|
|
657 region_mutations = amr_mutations[region_name].copy()
|
|
658 self.doc.new_line()
|
|
659 self.doc.new_header(level=3, title=region_name)
|
|
660 if (region_mutations.shape[0] == 0):
|
|
661 self.doc.append('None')
|
|
662 continue
|
|
663 region_mutations.iloc[:, 1] = region_mutations.iloc[:, 1].apply(lambda x: '{:,}'.format(x))
|
|
664 Table_List = ['Reference contig', 'Position', 'Reference', 'Alternate', 'Drug', 'Note']
|
|
665 for i in range(region_mutations.shape[0]):
|
|
666 Table_List = Table_List + region_mutations.iloc[i, [0, 1, 3, 4, 5, 6]].values.tolist()
|
|
667 row_count = int(len(Table_List) / 6)
|
|
668 self.doc.new_table(columns=6, rows=row_count, text=Table_List, text_align='left')
|
18
|
669 if os.path.getsize(self.errors_file) > 0:
|
|
670 # Report the errors encountered when attempting
|
|
671 # to find mutations in the sample.
|
|
672 self.doc.new_line()
|
|
673 self.doc.new_header(level=2, title=self.mutation_errors_title)
|
|
674 with open(self.errors_file, 'r') as efh:
|
|
675 for i, line in enumerate(efh):
|
|
676 line = line.strip()
|
|
677 if line:
|
|
678 self.doc.new_line('* %s' % line)
|
12
|
679 method = '%s reads were mapped to the reference sequence using %s.' % (self.read_type, self.minimap2_version)
|
0
|
680 self.methods[self.mutation_methods_title] = self.methods[self.mutation_methods_title].append(pandas.Series(method))
|
13
|
681 method = 'Mutations were identified using %s and %s.' % (self.samtools_version, self.varscan_version)
|
0
|
682 self.methods[self.mutation_methods_title] = self.methods[self.mutation_methods_title].append(pandas.Series(method))
|
|
683
|
|
684 def add_amr_matrix(self):
|
|
685 self.ofh.write("\nXXXXXX In add_amr_matrix\n\n")
|
|
686 # Make sure that we have an AMR matrix to plot
|
1
|
687 if len(self.amr_matrix_files) == 0:
|
|
688 return
|
|
689 self.doc.new_line()
|
|
690 self.doc.new_header(level=2, title=self.amr_matrix_title)
|
|
691 self.doc.new_line('AMR genes and mutations with their corresponding drugs')
|
|
692 for amr_matrix_file in self.amr_matrix_files:
|
|
693 self.doc.new_line(self.doc.new_inline_image(text='AMR genes and mutations with their corresponding drugs',
|
|
694 path=os.path.abspath(amr_matrix_file)))
|
0
|
695
|
|
696 def add_large_indels(self):
|
|
697 self.ofh.write("\nXXXXXX In add_large_indels\n\n")
|
1
|
698 large_indels = pandas.Series(dtype='float64')
|
|
699 # Pull in insertions.
|
|
700 try:
|
|
701 reference_insertions = pandas.read_csv(filepath_or_buffer=self.reference_insertions_file, sep='\t', header=None)
|
|
702 except Exception:
|
|
703 reference_insertions = pandas.DataFrame()
|
|
704 try:
|
|
705 genome_insertions = pandas.read_csv(filepath_or_buffer=self.genome_insertions_file, sep='\t', header=None)
|
|
706 except Exception:
|
|
707 genome_insertions = pandas.DataFrame()
|
|
708 large_indels['Reference insertions'] = reference_insertions
|
|
709 large_indels['Query insertions'] = genome_insertions
|
|
710 # Pull in deletions.
|
|
711 try:
|
|
712 amr_deletions = pandas.read_csv(filepath_or_buffer=self.amr_deletion_file, sep='\t', header=None)
|
|
713 except Exception:
|
|
714 amr_deletions = pandas.DataFrame()
|
|
715 if amr_deletions.shape[0] > 0:
|
|
716 amr_deletions.columns = ['contig', 'start', 'stop', 'name', 'type', 'drug', 'note']
|
|
717 amr_deletions = amr_deletions.loc[amr_deletions['type'].isin(['large-deletion', 'any']), :]
|
|
718 self.doc.new_line()
|
|
719 self.doc.new_header(level=2, title=self.large_indel_title)
|
25
|
720 self.doc.new_line('This section is informative only when your isolates were identified as *Bacillus anthracis* strains')
|
1
|
721 for genome in ['Reference insertions', 'Query insertions']:
|
|
722 genome_indels = large_indels[genome].copy()
|
|
723 self.doc.new_line()
|
|
724 self.doc.new_header(level=3, title=genome)
|
|
725 if (genome_indels.shape[0] == 0):
|
|
726 continue
|
|
727 genome_indels.iloc[:, 1] = genome_indels.iloc[:, 1].apply(lambda x: '{:,}'.format(x))
|
|
728 genome_indels.iloc[:, 2] = genome_indels.iloc[:, 2].apply(lambda x: '{:,}'.format(x))
|
|
729 genome_indels.iloc[:, 3] = genome_indels.iloc[:, 3].apply(lambda x: '{:,}'.format(x))
|
|
730 Table_List = [
|
|
731 'Reference contig', 'Start', 'Stop', 'Size (bp)'
|
|
732 ]
|
|
733 for i in range(genome_indels.shape[0]):
|
|
734 Table_List = Table_List + genome_indels.iloc[i, :].values.tolist()
|
|
735 row_count = int(len(Table_List) / 4)
|
|
736 self.doc.new_table(columns=4, rows=row_count, text=Table_List, text_align='left')
|
12
|
737 method = 'Large insertions or deletions were found as the complement of aligned regions using %s.' % self.bedtools_version
|
1
|
738 self.methods[self.reference_methods_title] = self.methods[self.reference_methods_title].append(pandas.Series(method))
|
|
739 self.doc.new_line()
|
|
740 self.doc.new_line('<div style="page-break-after: always;"></div>')
|
|
741 self.doc.new_line()
|
0
|
742
|
|
743 def add_plasmids(self):
|
8
|
744 try:
|
1
|
745 plasmids = pandas.read_csv(filepath_or_buffer=self.plasmids_file, sep='\t', header=0)
|
|
746 except Exception:
|
0
|
747 return
|
|
748 plasmids = plasmids.copy()
|
|
749 self.doc.new_line()
|
1
|
750 self.doc.new_header(level=2, title=self.plasmid_title)
|
0
|
751 if (plasmids.shape[0] == 0):
|
|
752 self.doc.new_line('None')
|
|
753 return
|
|
754 plasmids.iloc[:, 3] = plasmids.iloc[:, 3].apply(lambda x: '{:,}'.format(x))
|
|
755 plasmids.iloc[:, 4] = plasmids.iloc[:, 4].apply(lambda x: '{:,}'.format(x))
|
|
756 plasmids.iloc[:, 5] = plasmids.iloc[:, 5].apply(lambda x: '{:,}'.format(x))
|
1
|
757 Table_List = ['Genome contig', 'Plasmid hit', 'Plasmid acc.', 'Contig size', 'Aliged', 'Plasmid size']
|
0
|
758 for i in range(plasmids.shape[0]):
|
|
759 Table_List = Table_List + plasmids.iloc[i, 0:6].values.tolist()
|
|
760 row_count = int(len(Table_List) / 6)
|
|
761 self.doc.new_table(columns=6, rows=row_count, text=Table_List, text_align='left')
|
12
|
762 method = 'The plasmid reference database was queried against the genome assembly using %s.' % self.minimap2_version
|
0
|
763 self.methods[self.plasmid_methods_title] = self.methods[self.plasmid_methods_title].append(pandas.Series(method))
|
2
|
764 method = 'The resulting BAM was converted to a PSL using a custom version of sam2psl.'
|
0
|
765 self.methods[self.plasmid_methods_title] = self.methods[self.plasmid_methods_title].append(pandas.Series(method))
|
|
766 method = 'Plasmid-to-genome hits were resolved using the pChunks algorithm.'
|
|
767 self.methods[self.plasmid_methods_title] = self.methods[self.plasmid_methods_title].append(pandas.Series(method))
|
|
768
|
|
769 def add_methods(self):
|
|
770 self.ofh.write("\nXXXXXX In add_methods\n\n")
|
|
771 if len(self.methods) == 0:
|
|
772 return
|
|
773 self.doc.new_line()
|
|
774 self.doc.new_header(level=2, title=self.methods_title)
|
|
775 for methods_section in self.methods.index.tolist():
|
|
776 if self.methods[methods_section] is None or len(self.methods[methods_section]) == 0:
|
|
777 continue
|
|
778 self.doc.new_line()
|
|
779 self.doc.new_header(level=3, title=methods_section)
|
|
780 self.doc.new_paragraph(' '.join(self.methods[methods_section]))
|
24
|
781 self.doc.new_line('<div style="page-break-after: always;"></div>')
|
|
782 self.doc.new_line()
|
0
|
783
|
|
784 def add_summary(self):
|
|
785 self.ofh.write("\nXXXXXX In add_summary\n\n")
|
|
786 # Add summary title
|
|
787 self.doc.new_header(level=1, title=self.summary_title)
|
|
788 # First section of Summary
|
|
789 self.doc.new_header(level=1, title='CDC Advisory')
|
|
790 self.doc.new_paragraph(CDC_ADVISORY)
|
|
791 self.doc.new_line()
|
|
792 self.add_run_information()
|
|
793 self.add_ont_library_information()
|
|
794 methods = []
|
|
795 if self.did_guppy_ont_fast5:
|
|
796 methods += ['ONT reads were basecalled using guppy']
|
|
797 if self.did_qcat_ont_fastq:
|
|
798 methods += ['ONT reads were demultiplexed and trimmed using qcat']
|
|
799 self.methods[self.basecalling_methods_title] = pandas.Series(methods)
|
26
|
800 self.add_illumina_library_information()
|
|
801 self.add_assembly_information()
|
1
|
802 self.add_contig_info()
|
|
803 self.evaluate_assembly()
|
21
|
804 if self.assembler_version is not None:
|
|
805 if self.read_type == 'ONT':
|
|
806 method = 'ONT reads were assembled using %s' % self.assembler_version
|
|
807 self.methods[self.assembly_methods_title] = self.methods[self.assembly_methods_title].append(pandas.Series(method))
|
|
808 # Pull in the assembly summary and look at the coverage.
|
|
809 assembly_info = pandas.read_csv(self.flye_assembly_info_file, header=0, index_col=0, sep='\t')
|
|
810 # Look for non-circular contigs.
|
|
811 open_contigs = assembly_info.loc[assembly_info['circ.'] == 'N', :]
|
|
812 if open_contigs.shape[0] > 0:
|
|
813 open_contig_ids = open_contigs.index.values
|
|
814 warning = 'Flye reported {:d} open contigs ({:s}); assembly may be incomplete.'.format(open_contigs.shape[0], ', '.join(open_contig_ids))
|
|
815 self.assembly_notes = self.assembly_notes.append(pandas.Series(warning))
|
|
816 else:
|
|
817 method = 'Illumina reads were assembled using %s' % self.assembler_version
|
26
|
818 method = 'The genome assembly was polished using ONT reads and medaka.'
|
27
|
819 self.methods[self.assembly_methods_title] = self.methods[self.assembly_methods_title].append(pandas.Series(method))
|
1
|
820 self.add_assembly_notes()
|
0
|
821
|
|
822 def make_tex(self):
|
|
823 self.doc.new_table_of_contents(table_title='detailed run information', depth=2, marker="tableofcontents")
|
|
824 text = self.doc.file_data_text
|
|
825 text = text.replace("##--[", "")
|
|
826 text = text.replace("]--##", "")
|
|
827 self.doc.file_data_text = text
|
|
828 self.doc.create_md_file()
|
|
829
|
|
830 def make_report(self):
|
|
831 self.ofh.write("\nXXXXXX In make_report\n\n")
|
|
832 self.start_doc()
|
|
833 self.add_summary()
|
|
834 self.add_contamination()
|
|
835 self.add_alignment()
|
|
836 self.add_features()
|
|
837 self.add_feature_plots()
|
|
838 self.add_mutations()
|
|
839 self.add_large_indels()
|
|
840 self.add_plasmids()
|
|
841 self.add_amr_matrix()
|
|
842 # self.add_snps()
|
|
843 self.add_methods()
|
|
844 self.make_tex()
|
|
845 # It took me quite a long time to find out that the value of the -t
|
|
846 # (implied) argument in the following command must be 'html' instead of
|
|
847 # the more logical 'pdf'. see the answer from snsn in this thread:
|
|
848 # https://github.com/jessicategner/pypandoc/issues/186
|
|
849 self.ofh.write("\nXXXXX In make_report, calling pypandoc.convert_file...\n\n")
|
|
850 pypandoc.convert_file(self.report_md,
|
|
851 'html',
|
|
852 extra_args=['--pdf-engine=weasyprint', '-V', '-css=%s' % self.pima_css],
|
|
853 outputfile='pima_report.pdf')
|
|
854 self.ofh.close()
|
|
855
|
|
856
|
|
857 parser = argparse.ArgumentParser()
|
|
858
|
1
|
859 parser.add_argument('--amr_deletions_file', action='store', dest='amr_deletions_file', help='AMR deletions BED file')
|
|
860 parser.add_argument('--amr_matrix_png_dir', action='store', dest='amr_matrix_png_dir', help='Directory of AMR matrix PNG files')
|
0
|
861 parser.add_argument('--analysis_name', action='store', dest='analysis_name', help='Sample identifier')
|
21
|
862 parser.add_argument('--assembler_version', action='store', dest='assembler_version', default=None, help='Assembler version string')
|
0
|
863 parser.add_argument('--assembly_fasta_file', action='store', dest='assembly_fasta_file', help='Assembly fasta file')
|
|
864 parser.add_argument('--assembly_name', action='store', dest='assembly_name', help='Assembly identifier')
|
12
|
865 parser.add_argument('--bedtools_version', action='store', dest='bedtools_version', default=None, help='Bedtools version string')
|
2
|
866 parser.add_argument('--blastn_version', action='store', dest='blastn_version', default=None, help='Blastn version string')
|
13
|
867 parser.add_argument('--circos_png_dir', action='store', dest='circos_png_dir', help='Directory of circos PNG files')
|
1
|
868 parser.add_argument('--compute_sequence_length_file', action='store', dest='compute_sequence_length_file', help='Comnpute sequence length tabular file')
|
|
869 parser.add_argument('--contig_coverage_file', action='store', dest='contig_coverage_file', help='Contig coverage TSV file')
|
|
870 parser.add_argument('--dbkey', action='store', dest='dbkey', help='Reference genome identifier')
|
|
871 parser.add_argument('--dnadiff_snps_file', action='store', dest='dnadiff_snps_file', help='DNAdiff snps tabular file')
|
12
|
872 parser.add_argument('--dnadiff_version', action='store', dest='dnadiff_version', default=None, help='DNAdiff version string')
|
18
|
873 parser.add_argument('--errors_file', action='store', dest='errors_file', default=None, help='AMR mutations errors encountered txt file')
|
0
|
874 parser.add_argument('--feature_bed_dir', action='store', dest='feature_bed_dir', help='Directory of best feature hits bed files')
|
|
875 parser.add_argument('--feature_png_dir', action='store', dest='feature_png_dir', help='Directory of best feature hits png files')
|
1
|
876 parser.add_argument('--flye_assembly_info_file', action='store', dest='flye_assembly_info_file', default=None, help='Flye assembly info tabular file')
|
|
877 parser.add_argument('--genome_insertions_file', action='store', dest='genome_insertions_file', help='Genome insertions BED file')
|
26
|
878 parser.add_argument('--gzipped', action='store_true', dest='gzipped', default=False, help='Sample(s) is/are gzipped')
|
|
879 parser.add_argument('--illumina_forward_read_file', action='store', dest='illumina_forward_read_file', help='Illumina forward read file')
|
|
880 parser.add_argument('--illumina_reverse_read_file', action='store', dest='illumina_reverse_read_file', help='Illumina reverse read file')
|
2
|
881 parser.add_argument('--kraken2_report_file', action='store', dest='kraken2_report_file', default=None, help='kraken2 report file')
|
|
882 parser.add_argument('--kraken2_version', action='store', dest='kraken2_version', default=None, help='kraken2 version string')
|
12
|
883 parser.add_argument('--minimap2_version', action='store', dest='minimap2_version', default=None, help='minimap2 version string')
|
0
|
884 parser.add_argument('--mutation_regions_bed_file', action='store', dest='mutation_regions_bed_file', help='AMR mutation regions BRD file')
|
|
885 parser.add_argument('--mutation_regions_dir', action='store', dest='mutation_regions_dir', help='Directory of mutation regions TSV files')
|
26
|
886 parser.add_argument('--ont_file', action='store', dest='ont_file', help='ONT single read file')
|
0
|
887 parser.add_argument('--pima_css', action='store', dest='pima_css', help='PIMA css stypesheet')
|
23
|
888 parser.add_argument('--plasmids_file', action='store', dest='plasmids_file', default=None, help='pChunks plasmids TSV file')
|
13
|
889 parser.add_argument('--quast_report_file', action='store', dest='quast_report_file', help='Quast report tabular file')
|
18
|
890 parser.add_argument('--read_type', action='store', dest='read_type', help='Sample read type (ONT or Illumina)')
|
1
|
891 parser.add_argument('--reference_insertions_file', action='store', dest='reference_insertions_file', help='Reference insertions BED file')
|
12
|
892 parser.add_argument('--samtools_version', action='store', dest='samtools_version', default=None, help='Samtools version string')
|
|
893 parser.add_argument('--varscan_version', action='store', dest='varscan_version', default=None, help='Varscan version string')
|
0
|
894
|
|
895 args = parser.parse_args()
|
|
896
|
1
|
897 # Prepare the AMR matrix PNG files.
|
|
898 amr_matrix_files = []
|
|
899 for file_name in sorted(os.listdir(args.amr_matrix_png_dir)):
|
|
900 file_path = os.path.abspath(os.path.join(args.amr_matrix_png_dir, file_name))
|
|
901 amr_matrix_files.append(file_path)
|
13
|
902 # Prepare the circos PNG files.
|
|
903 circos_files = []
|
|
904 for file_name in sorted(os.listdir(args.circos_png_dir)):
|
|
905 file_path = os.path.abspath(os.path.join(args.circos_png_dir, file_name))
|
|
906 circos_files.append(file_path)
|
0
|
907 # Prepare the features BED files.
|
|
908 feature_bed_files = []
|
|
909 for file_name in sorted(os.listdir(args.feature_bed_dir)):
|
|
910 file_path = os.path.abspath(os.path.join(args.feature_bed_dir, file_name))
|
|
911 feature_bed_files.append(file_path)
|
|
912 # Prepare the features PNG files.
|
|
913 feature_png_files = []
|
|
914 for file_name in sorted(os.listdir(args.feature_png_dir)):
|
|
915 file_path = os.path.abspath(os.path.join(args.feature_png_dir, file_name))
|
|
916 feature_png_files.append(file_path)
|
|
917 # Prepare the mutation regions TSV files.
|
|
918 mutation_regions_files = []
|
|
919 for file_name in sorted(os.listdir(args.mutation_regions_dir)):
|
|
920 file_path = os.path.abspath(os.path.join(args.feature_png_dir, file_name))
|
|
921 mutation_regions_files.append(file_path)
|
|
922
|
|
923 markdown_report = PimaReport(args.analysis_name,
|
1
|
924 args.amr_deletions_file,
|
|
925 amr_matrix_files,
|
21
|
926 args.assembler_version,
|
0
|
927 args.assembly_fasta_file,
|
|
928 args.assembly_name,
|
12
|
929 args.bedtools_version,
|
2
|
930 args.blastn_version,
|
13
|
931 circos_files,
|
1
|
932 args.compute_sequence_length_file,
|
|
933 args.contig_coverage_file,
|
|
934 args.dbkey,
|
|
935 args.dnadiff_snps_file,
|
2
|
936 args.dnadiff_version,
|
18
|
937 args.errors_file,
|
0
|
938 feature_bed_files,
|
|
939 feature_png_files,
|
1
|
940 args.flye_assembly_info_file,
|
|
941 args.genome_insertions_file,
|
0
|
942 args.gzipped,
|
26
|
943 args.illumina_forward_read_file,
|
|
944 args.illumina_reverse_read_file,
|
2
|
945 args.kraken2_report_file,
|
|
946 args.kraken2_version,
|
12
|
947 args.minimap2_version,
|
0
|
948 args.mutation_regions_bed_file,
|
|
949 mutation_regions_files,
|
26
|
950 args.ont_file,
|
1
|
951 args.pima_css,
|
|
952 args.plasmids_file,
|
13
|
953 args.quast_report_file,
|
18
|
954 args.read_type,
|
12
|
955 args.reference_insertions_file,
|
|
956 args.samtools_version,
|
|
957 args.varscan_version)
|
0
|
958 markdown_report.make_report()
|