0
|
1 #!/usr/bin/env python
|
|
2
|
|
3 import argparse
|
|
4 import gzip
|
|
5 import os
|
1
|
6 import shutil
|
0
|
7
|
4
|
8 import numpy
|
|
9 import pandas
|
|
10
|
3
|
11 QUALITYKEY = {'!': '0', '"': '1', '#': '2', '$': '3', '%': '4', '&': '5', "'": '6', '(': '7',
|
|
12 ')': '8', '*': '9', '+': '10', ',': '11', '-': '12', '.': '13', '/': '14', '0': '15',
|
|
13 '1': '16', '2': '17', '3': '18', '4': '19', '5': '20', '6': '21', '7': '22',
|
|
14 '8': '23', '9': '24', ':': '25', ';': '26', '<': '27', '=': '28', '>': '29',
|
|
15 '?': '30', '@': '31', 'A': '32', 'B': '33', 'C': '34', 'D': '35', 'E': '36',
|
|
16 'F': '37', 'G': '38', 'H': '39', 'I': '40', 'J': '41', 'K': '42', 'L': '43',
|
|
17 'M': '44', 'N': '45', 'O': '46', 'P': '47', 'Q': '48', 'R': '49', 'S': '50',
|
|
18 'T': '51', 'U': '52', 'V': '53', 'W': '54', 'X': '55', 'Y': '56', 'Z': '57',
|
|
19 '_': '1', ']': '1', '[': '1', '\\': '1', '\n': '1', '`': '1', 'a': '1', 'b': '1',
|
|
20 'c': '1', 'd': '1', 'e': '1', 'f': '1', 'g': '1', 'h': '1', 'i': '1', 'j': '1',
|
|
21 'k': '1', 'l': '1', 'm': '1', 'n': '1', 'o': '1', 'p': '1', 'q': '1', 'r': '1',
|
|
22 's': '1', 't': '1', 'u': '1', 'v': '1', 'w': '1', 'x': '1', 'y': '1', 'z': '1',
|
|
23 ' ': '1'}
|
1
|
24
|
|
25
|
|
26 def fastq_to_df(fastq_file, gzipped):
|
4
|
27 if gzipped:
|
1
|
28 return pandas.read_csv(gzip.open(fastq_file, "r"), header=None, sep="^")
|
4
|
29 return pandas.read_csv(open(fastq_file, "r"), header=None, sep="^")
|
0
|
30
|
|
31
|
|
32 def nice_size(size):
|
|
33 # Returns a readably formatted string with the size
|
|
34 words = ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']
|
|
35 prefix = ''
|
|
36 try:
|
|
37 size = float(size)
|
|
38 if size < 0:
|
|
39 size = abs(size)
|
|
40 prefix = '-'
|
|
41 except Exception:
|
|
42 return '??? bytes'
|
|
43 for ind, word in enumerate(words):
|
|
44 step = 1024 ** (ind + 1)
|
|
45 if step > size:
|
|
46 size = size / float(1024 ** ind)
|
|
47 if word == 'bytes': # No decimals for bytes
|
|
48 return "%s%d bytes" % (prefix, size)
|
|
49 return "%s%.1f %s" % (prefix, size, word)
|
|
50 return '??? bytes'
|
|
51
|
|
52
|
4
|
53 def output_statistics(fastq_files, idxstats_files, metrics_files, output_file, gzipped, dbkey):
|
1
|
54 # Produce an Excel spreadsheet that
|
|
55 # contains a row for each sample.
|
|
56 columns = ['Reference', 'File Size', 'Mean Read Length', 'Mean Read Quality', 'Reads Passing Q30',
|
|
57 'Total Reads', 'All Mapped Reads', 'Unmapped Reads', 'Unmapped Reads Percentage of Total',
|
|
58 'Reference with Coverage', 'Average Depth of Coverage', 'Good SNP Count']
|
|
59 data_frames = []
|
4
|
60 for i, fastq_file in enumerate(fastq_files):
|
1
|
61 idxstats_file = idxstats_files[i]
|
|
62 metrics_file = metrics_files[i]
|
|
63 file_name_base = os.path.basename(fastq_file)
|
|
64 # Read fastq_file into a data frame.
|
|
65 fastq_df = fastq_to_df(fastq_file, gzipped)
|
|
66 total_reads = int(len(fastq_df.index) / 4)
|
|
67 current_sample_df = pandas.DataFrame(index=[file_name_base], columns=columns)
|
|
68 # Reference
|
|
69 current_sample_df.at[file_name_base, 'Reference'] = dbkey
|
|
70 # File Size
|
|
71 current_sample_df.at[file_name_base, 'File Size'] = nice_size(os.path.getsize(fastq_file))
|
|
72 # Mean Read Length
|
|
73 sampling_size = 10000
|
|
74 if sampling_size > total_reads:
|
|
75 sampling_size = total_reads
|
|
76 fastq_df = fastq_df.iloc[3::4].sample(sampling_size)
|
|
77 dict_mean = {}
|
|
78 list_length = []
|
|
79 for index, row in fastq_df.iterrows():
|
|
80 base_qualities = []
|
|
81 for base in list(row.array[0]):
|
|
82 base_qualities.append(int(QUALITYKEY[base]))
|
|
83 dict_mean[index] = numpy.mean(base_qualities)
|
|
84 list_length.append(len(row.array[0]))
|
|
85 current_sample_df.at[file_name_base, 'Mean Read Length'] = "%.1f" % numpy.mean(list_length)
|
|
86 # Mean Read Quality
|
|
87 df_mean = pandas.DataFrame.from_dict(dict_mean, orient='index', columns=['ave'])
|
|
88 current_sample_df.at[file_name_base, 'Mean Read Quality'] = "%.1f" % df_mean['ave'].mean()
|
|
89 # Reads Passing Q30
|
|
90 reads_gt_q30 = len(df_mean[df_mean['ave'] >= 30])
|
|
91 reads_passing_q30 = "{:10.2f}".format(reads_gt_q30 / sampling_size)
|
|
92 current_sample_df.at[file_name_base, 'Reads Passing Q30'] = reads_passing_q30
|
|
93 # Total Reads
|
|
94 current_sample_df.at[file_name_base, 'Total Reads'] = total_reads
|
|
95 # All Mapped Reads
|
|
96 all_mapped_reads, unmapped_reads = process_idxstats_file(idxstats_file)
|
|
97 current_sample_df.at[file_name_base, 'All Mapped Reads'] = all_mapped_reads
|
|
98 # Unmapped Reads
|
|
99 current_sample_df.at[file_name_base, 'Unmapped Reads'] = unmapped_reads
|
|
100 # Unmapped Reads Percentage of Total
|
|
101 if unmapped_reads > 0:
|
|
102 unmapped_reads_percentage = "{:10.2f}".format(unmapped_reads / total_reads)
|
0
|
103 else:
|
1
|
104 unmapped_reads_percentage = 0
|
|
105 current_sample_df.at[file_name_base, 'Unmapped Reads Percentage of Total'] = unmapped_reads_percentage
|
|
106 # Reference with Coverage
|
|
107 ref_with_coverage, avg_depth_of_coverage, good_snp_count = process_metrics_file(metrics_file)
|
|
108 current_sample_df.at[file_name_base, 'Reference with Coverage'] = ref_with_coverage
|
|
109 # Average Depth of Coverage
|
|
110 current_sample_df.at[file_name_base, 'Average Depth of Coverage'] = avg_depth_of_coverage
|
|
111 # Good SNP Count
|
|
112 current_sample_df.at[file_name_base, 'Good SNP Count'] = good_snp_count
|
|
113 data_frames.append(current_sample_df)
|
|
114 excel_df = pandas.concat(data_frames)
|
|
115 excel_file_name = "output.xlsx"
|
|
116 writer = pandas.ExcelWriter(excel_file_name, engine='xlsxwriter')
|
|
117 excel_df.to_excel(writer, sheet_name='Sheet1')
|
|
118 writer.save()
|
|
119 shutil.move(excel_file_name, output_file)
|
0
|
120
|
|
121
|
1
|
122 def process_idxstats_file(idxstats_file):
|
|
123 all_mapped_reads = 0
|
|
124 unmapped_reads = 0
|
|
125 with open(idxstats_file, "r") as fh:
|
|
126 for i, line in enumerate(fh):
|
|
127 items = line.split("\t")
|
|
128 if i == 0:
|
|
129 # NC_002945.4 4349904 213570 4047
|
|
130 all_mapped_reads = int(items[2])
|
|
131 elif i == 1:
|
|
132 # * 0 0 82774
|
|
133 unmapped_reads = int(items[3])
|
|
134 return all_mapped_reads, unmapped_reads
|
0
|
135
|
|
136
|
1
|
137 def process_metrics_file(metrics_file):
|
|
138 ref_with_coverage = '0%'
|
|
139 avg_depth_of_coverage = 0
|
|
140 good_snp_count = 0
|
|
141 with open(metrics_file, "r") as ifh:
|
|
142 for i, line in enumerate(ifh):
|
|
143 if i == 0:
|
|
144 # Skip comments.
|
|
145 continue
|
|
146 items = line.split("\t")
|
|
147 if i == 1:
|
|
148 # MarkDuplicates 10.338671 98.74%
|
|
149 ref_with_coverage = items[3]
|
|
150 avg_depth_of_coverage = items[2]
|
|
151 elif i == 2:
|
|
152 # VCFfilter 611
|
|
153 good_snp_count = items[1]
|
|
154 return ref_with_coverage, avg_depth_of_coverage, good_snp_count
|
0
|
155
|
|
156
|
4
|
157 parser = argparse.ArgumentParser()
|
0
|
158
|
4
|
159 parser.add_argument('--dbkey', action='store', dest='dbkey', help='Reference dbkey')
|
|
160 parser.add_argument('--gzipped', action='store_true', dest='gzipped', required=False, default=False, help='Input files are gzipped')
|
|
161 parser.add_argument('--input_idxstats_dir', action='store', dest='input_idxstats_dir', required=False, default=None, help='Samtools idxstats input directory')
|
|
162 parser.add_argument('--input_metrics_dir', action='store', dest='input_metrics_dir', required=False, default=None, help='vSNP add zero coverage metrics input directory')
|
|
163 parser.add_argument('--input_reads_dir', action='store', dest='input_reads_dir', required=False, default=None, help='Samples input directory')
|
|
164 parser.add_argument('--list_paired', action='store_true', dest='list_paired', required=False, default=False, help='Input samples is a list of paired reads')
|
|
165 parser.add_argument('--output', action='store', dest='output', help='Output Excel statistics file')
|
|
166 parser.add_argument('--read1', action='store', dest='read1', help='Required: single read')
|
|
167 parser.add_argument('--read2', action='store', dest='read2', required=False, default=None, help='Optional: paired read')
|
|
168 parser.add_argument('--samtools_idxstats', action='store', dest='samtools_idxstats', help='Output of samtools_idxstats')
|
|
169 parser.add_argument('--vsnp_azc', action='store', dest='vsnp_azc', help='Output of vsnp_add_zero_coverage')
|
0
|
170
|
4
|
171 args = parser.parse_args()
|
0
|
172
|
4
|
173 fastq_files = []
|
|
174 idxstats_files = []
|
|
175 metrics_files = []
|
|
176 # Accumulate inputs.
|
|
177 if args.read1 is not None:
|
|
178 # The inputs are not dataset collections, so
|
|
179 # read1, read2 (possibly) and vsnp_azc will also
|
|
180 # not be None.
|
|
181 fastq_files.append(args.read1)
|
|
182 idxstats_files.append(args.samtools_idxstats)
|
|
183 metrics_files.append(args.vsnp_azc)
|
|
184 if args.read2 is not None:
|
|
185 fastq_files.append(args.read2)
|
0
|
186 idxstats_files.append(args.samtools_idxstats)
|
|
187 metrics_files.append(args.vsnp_azc)
|
4
|
188 else:
|
|
189 for file_name in sorted(os.listdir(args.input_reads_dir)):
|
|
190 fastq_files.append(os.path.join(args.input_reads_dir, file_name))
|
|
191 for file_name in sorted(os.listdir(args.input_idxstats_dir)):
|
|
192 idxstats_files.append(os.path.join(args.input_idxstats_dir, file_name))
|
|
193 if args.list_paired:
|
|
194 # Add the idxstats file for reverse.
|
|
195 idxstats_files.append(os.path.join(args.input_idxstats_dir, file_name))
|
|
196 for file_name in sorted(os.listdir(args.input_metrics_dir)):
|
|
197 metrics_files.append(os.path.join(args.input_metrics_dir, file_name))
|
|
198 if args.list_paired:
|
|
199 # Add the metrics file for reverse.
|
|
200 metrics_files.append(os.path.join(args.input_metrics_dir, file_name))
|
|
201 output_statistics(fastq_files, idxstats_files, metrics_files, args.output, args.gzipped, args.dbkey)
|