Mercurial > repos > greg > vsnp_statistics
annotate vsnp_statistics.py @ 23:b34843f09f9f draft
Uploaded
| author | greg |
|---|---|
| date | Fri, 27 Aug 2021 19:56:20 +0000 |
| parents | 377c1a96aae9 |
| children | b908bb18008a |
| rev | line source |
|---|---|
| 0 | 1 #!/usr/bin/env python |
| 2 | |
| 3 import argparse | |
| 4 import gzip | |
| 5 import os | |
|
5
d0fbdeaaa488
"planemo upload for repository https://github.com/gregvonkuster/galaxy_tools/tree/master/tools/sequence_analysis/vsnp/vsnp_statistics commit 770e89322a15829580ed9577a853660f63233f32"
greg
parents:
4
diff
changeset
|
6 from functools import partial |
| 0 | 7 |
| 4 | 8 import numpy |
| 9 import pandas | |
|
5
d0fbdeaaa488
"planemo upload for repository https://github.com/gregvonkuster/galaxy_tools/tree/master/tools/sequence_analysis/vsnp/vsnp_statistics commit 770e89322a15829580ed9577a853660f63233f32"
greg
parents:
4
diff
changeset
|
10 from Bio import SeqIO |
| 0 | 11 |
| 12 | |
| 8 | 13 class Statistics: |
| 14 | |
| 15 def __init__(self, reference, fastq_file, file_size, total_reads, mean_read_length, mean_read_quality, reads_passing_q30): | |
| 16 self.reference = reference | |
| 17 self.fastq_file = fastq_file | |
| 18 self.file_size = file_size | |
| 19 self.total_reads = total_reads | |
| 20 self.mean_read_length = mean_read_length | |
| 21 self.mean_read_quality = mean_read_quality | |
| 22 self.reads_passing_q30 = reads_passing_q30 | |
| 23 | |
| 24 | |
| 0 | 25 def nice_size(size): |
| 26 # Returns a readably formatted string with the size | |
| 27 words = ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB'] | |
| 28 prefix = '' | |
| 29 try: | |
| 30 size = float(size) | |
| 31 if size < 0: | |
| 32 size = abs(size) | |
| 33 prefix = '-' | |
| 34 except Exception: | |
| 35 return '??? bytes' | |
| 36 for ind, word in enumerate(words): | |
| 37 step = 1024 ** (ind + 1) | |
| 38 if step > size: | |
| 39 size = size / float(1024 ** ind) | |
| 40 if word == 'bytes': # No decimals for bytes | |
| 41 return "%s%d bytes" % (prefix, size) | |
| 42 return "%s%.1f %s" % (prefix, size, word) | |
| 43 return '??? bytes' | |
| 44 | |
| 45 | |
| 8 | 46 def get_statistics(dbkey, fastq_file, gzipped): |
| 47 sampling_size = 10000 | |
| 48 # Read fastq_file into a data fram to | |
| 49 # get the phred quality scores. | |
| 50 _open = partial(gzip.open, mode='rt') if gzipped else open | |
| 51 with _open(fastq_file) as fh: | |
| 52 identifiers = [] | |
| 53 seqs = [] | |
| 54 letter_annotations = [] | |
| 55 for seq_record in SeqIO.parse(fh, "fastq"): | |
| 56 identifiers.append(seq_record.id) | |
| 57 seqs.append(seq_record.seq) | |
| 58 letter_annotations.append(seq_record.letter_annotations["phred_quality"]) | |
| 59 # Convert lists to Pandas series. | |
| 60 s1 = pandas.Series(identifiers, name='id') | |
| 61 s2 = pandas.Series(seqs, name='seq') | |
| 62 # Gather Series into a data frame. | |
| 63 fastq_df = pandas.DataFrame(dict(id=s1, seq=s2)).set_index(['id']) | |
| 64 # Starting at row 3, keep every 4 row | |
| 65 # random sample specified number of rows. | |
| 66 file_size = nice_size(os.path.getsize(fastq_file)) | |
| 67 total_reads = int(len(fastq_df.index) / 4) | |
| 68 # Mean Read Length | |
| 69 if sampling_size > total_reads: | |
| 70 sampling_size = total_reads | |
| 71 fastq_df = fastq_df.iloc[3::4].sample(sampling_size) | |
| 72 dict_mean = {} | |
| 73 list_length = [] | |
| 74 i = 0 | |
| 75 for id, seq, in fastq_df.iterrows(): | |
| 76 dict_mean[id] = numpy.mean(letter_annotations[i]) | |
| 77 list_length.append(len(seq.array[0])) | |
| 78 i += 1 | |
| 79 mean_read_length = '%.1f' % numpy.mean(list_length) | |
| 80 # Mean Read Quality | |
| 81 df_mean = pandas.DataFrame.from_dict(dict_mean, orient='index', columns=['ave']) | |
| 82 mean_read_quality = '%.1f' % df_mean['ave'].mean() | |
| 83 # Reads Passing Q30 | |
| 84 reads_gt_q30 = len(df_mean[df_mean['ave'] >= 30]) | |
| 85 reads_passing_q30 = '{:10.2f}'.format(reads_gt_q30 / sampling_size) | |
| 86 stats = Statistics(dbkey, os.path.basename(fastq_file), file_size, total_reads, mean_read_length, | |
| 87 mean_read_quality, reads_passing_q30) | |
| 88 return stats | |
| 89 | |
| 90 | |
| 91 def accrue_statistics(dbkey, read1, read2, gzipped): | |
| 92 read1_stats = get_statistics(dbkey, read1, gzipped) | |
| 93 if read2 is None: | |
| 94 read2_stats = None | |
| 95 else: | |
| 96 read2_stats = get_statistics(dbkey, read2, gzipped) | |
| 97 return read1_stats, read2_stats | |
| 98 | |
| 99 | |
| 100 def output_statistics(read1_stats, read2_stats, idxstats_file, metrics_file, output_file): | |
| 101 paired_reads = read2_stats is not None | |
| 102 if paired_reads: | |
| 23 | 103 columns = ['Read1 FASTQ', 'File Size', 'Reads', 'Mean Read Length', 'Mean Read Quality', |
| 8 | 104 'Reads Passing Q30', 'Read2 FASTQ', 'File Size', 'Reads', 'Mean Read Length', 'Mean Read Quality', |
| 105 'Reads Passing Q30', 'Total Reads', 'All Mapped Reads', 'Unmapped Reads', | |
| 106 'Unmapped Reads Percentage of Total', 'Reference with Coverage', 'Average Depth of Coverage', | |
| 23 | 107 'Good SNP Count', 'Reference'] |
| 8 | 108 else: |
| 23 | 109 columns = ['FASTQ', 'File Size', 'Mean Read Length', 'Mean Read Quality', 'Reads Passing Q30', |
| 8 | 110 'Total Reads', 'All Mapped Reads', 'Unmapped Reads', 'Unmapped Reads Percentage of Total', |
| 23 | 111 'Reference with Coverage', 'Average Depth of Coverage', 'Good SNP Count', 'Reference'] |
| 8 | 112 with open(output_file, "w") as outfh: |
| 21 | 113 # Make sure the header starts with a # so |
| 114 # MultiQC can properly handle the output. | |
| 23 | 115 outfh.write("%s\n" % "\t".join(columns)) |
| 8 | 116 line_items = [] |
| 117 # Get the current stats and associated files. | |
| 118 # Get and output the statistics. | |
| 119 line_items.append(read1_stats.fastq_file) | |
| 120 line_items.append(read1_stats.file_size) | |
| 121 if paired_reads: | |
| 122 line_items.append(read1_stats.total_reads) | |
| 123 line_items.append(read1_stats.mean_read_length) | |
| 124 line_items.append(read1_stats.mean_read_quality) | |
| 125 line_items.append(read1_stats.reads_passing_q30) | |
| 126 if paired_reads: | |
| 127 line_items.append(read2_stats.fastq_file) | |
| 128 line_items.append(read2_stats.file_size) | |
| 129 line_items.append(read2_stats.total_reads) | |
| 130 line_items.append(read2_stats.mean_read_length) | |
| 131 line_items.append(read2_stats.mean_read_quality) | |
| 132 line_items.append(read2_stats.reads_passing_q30) | |
| 1 | 133 # Total Reads |
| 8 | 134 if paired_reads: |
| 135 total_reads = read1_stats.total_reads + read2_stats.total_reads | |
| 136 else: | |
| 137 total_reads = read1_stats.total_reads | |
| 138 line_items.append(total_reads) | |
| 1 | 139 # All Mapped Reads |
| 140 all_mapped_reads, unmapped_reads = process_idxstats_file(idxstats_file) | |
| 8 | 141 line_items.append(all_mapped_reads) |
| 142 line_items.append(unmapped_reads) | |
| 1 | 143 # Unmapped Reads Percentage of Total |
| 144 if unmapped_reads > 0: | |
|
5
d0fbdeaaa488
"planemo upload for repository https://github.com/gregvonkuster/galaxy_tools/tree/master/tools/sequence_analysis/vsnp/vsnp_statistics commit 770e89322a15829580ed9577a853660f63233f32"
greg
parents:
4
diff
changeset
|
145 unmapped_reads_percentage = '{:10.2f}'.format(unmapped_reads / total_reads) |
| 0 | 146 else: |
| 1 | 147 unmapped_reads_percentage = 0 |
| 8 | 148 line_items.append(unmapped_reads_percentage) |
| 1 | 149 # Reference with Coverage |
| 150 ref_with_coverage, avg_depth_of_coverage, good_snp_count = process_metrics_file(metrics_file) | |
| 8 | 151 line_items.append(ref_with_coverage) |
| 152 line_items.append(avg_depth_of_coverage) | |
| 153 line_items.append(good_snp_count) | |
| 23 | 154 line_items.append(read1_stats.reference) |
| 8 | 155 outfh.write('%s\n' % '\t'.join(str(x) for x in line_items)) |
| 0 | 156 |
| 157 | |
| 1 | 158 def process_idxstats_file(idxstats_file): |
| 159 all_mapped_reads = 0 | |
| 160 unmapped_reads = 0 | |
| 161 with open(idxstats_file, "r") as fh: | |
| 162 for i, line in enumerate(fh): | |
|
5
d0fbdeaaa488
"planemo upload for repository https://github.com/gregvonkuster/galaxy_tools/tree/master/tools/sequence_analysis/vsnp/vsnp_statistics commit 770e89322a15829580ed9577a853660f63233f32"
greg
parents:
4
diff
changeset
|
163 line = line.rstrip('\r\n') |
| 1 | 164 items = line.split("\t") |
| 165 if i == 0: | |
| 166 # NC_002945.4 4349904 213570 4047 | |
| 167 all_mapped_reads = int(items[2]) | |
| 168 elif i == 1: | |
| 169 # * 0 0 82774 | |
| 170 unmapped_reads = int(items[3]) | |
| 171 return all_mapped_reads, unmapped_reads | |
| 0 | 172 |
| 173 | |
| 1 | 174 def process_metrics_file(metrics_file): |
| 175 ref_with_coverage = '0%' | |
| 176 avg_depth_of_coverage = 0 | |
| 177 good_snp_count = 0 | |
| 178 with open(metrics_file, "r") as ifh: | |
| 179 for i, line in enumerate(ifh): | |
| 180 if i == 0: | |
| 181 # Skip comments. | |
| 182 continue | |
|
5
d0fbdeaaa488
"planemo upload for repository https://github.com/gregvonkuster/galaxy_tools/tree/master/tools/sequence_analysis/vsnp/vsnp_statistics commit 770e89322a15829580ed9577a853660f63233f32"
greg
parents:
4
diff
changeset
|
183 line = line.rstrip('\r\n') |
| 1 | 184 items = line.split("\t") |
| 185 if i == 1: | |
| 186 # MarkDuplicates 10.338671 98.74% | |
| 187 ref_with_coverage = items[3] | |
| 188 avg_depth_of_coverage = items[2] | |
| 189 elif i == 2: | |
| 190 # VCFfilter 611 | |
| 191 good_snp_count = items[1] | |
| 192 return ref_with_coverage, avg_depth_of_coverage, good_snp_count | |
| 0 | 193 |
| 194 | |
| 4 | 195 parser = argparse.ArgumentParser() |
| 0 | 196 |
| 4 | 197 parser.add_argument('--dbkey', action='store', dest='dbkey', help='Reference dbkey') |
| 198 parser.add_argument('--gzipped', action='store_true', dest='gzipped', required=False, default=False, help='Input files are gzipped') | |
| 199 parser.add_argument('--output', action='store', dest='output', help='Output Excel statistics file') | |
| 200 parser.add_argument('--read1', action='store', dest='read1', help='Required: single read') | |
| 201 parser.add_argument('--read2', action='store', dest='read2', required=False, default=None, help='Optional: paired read') | |
| 202 parser.add_argument('--samtools_idxstats', action='store', dest='samtools_idxstats', help='Output of samtools_idxstats') | |
| 8 | 203 parser.add_argument('--vsnp_azc_metrics', action='store', dest='vsnp_azc_metrics', help='Output of vsnp_add_zero_coverage') |
| 0 | 204 |
| 4 | 205 args = parser.parse_args() |
| 0 | 206 |
| 8 | 207 stats_list = [] |
| 4 | 208 idxstats_files = [] |
| 209 metrics_files = [] | |
| 210 # Accumulate inputs. | |
| 8 | 211 read1_stats, read2_stats = accrue_statistics(args.dbkey, args.read1, args.read2, args.gzipped) |
| 212 output_statistics(read1_stats, read2_stats, args.samtools_idxstats, args.vsnp_azc_metrics, args.output) |
