Mercurial > repos > greg > vsnp_statistics
comparison vsnp_statistics.py @ 0:c21d338dbdc4 draft
Uploaded
| author | greg |
|---|---|
| date | Tue, 21 Apr 2020 10:19:53 -0400 |
| parents | |
| children | 14e29f7d59ca |
comparison
equal
deleted
inserted
replaced
| -1:000000000000 | 0:c21d338dbdc4 |
|---|---|
| 1 #!/usr/bin/env python | |
| 2 | |
| 3 import argparse | |
| 4 import gzip | |
| 5 import multiprocessing | |
| 6 import numpy | |
| 7 import os | |
| 8 import pandas | |
| 9 import queue | |
| 10 | |
| 11 INPUT_IDXSTATS_DIR = 'input_idxstats' | |
| 12 INPUT_METRICS_DIR = 'input_metrics' | |
| 13 INPUT_READS_DIR = 'input_reads' | |
| 14 OUTPUT_DIR = 'output' | |
| 15 QUALITYKEY = {'!':'0', '"':'1', '#':'2', '$':'3', '%':'4', '&':'5', "'":'6', '(':'7', ')':'8', '*':'9', '+':'10', ',':'11', '-':'12', '.':'13', '/':'14', '0':'15', '1':'16', '2':'17', '3':'18', '4':'19', '5':'20', '6':'21', '7':'22', '8':'23', '9':'24', ':':'25', ';':'26', '<':'27', '=':'28', '>':'29', '?':'30', '@':'31', 'A':'32', 'B':'33', 'C':'34', 'D':'35', 'E':'36', 'F':'37', 'G':'38', 'H':'39', 'I':'40', 'J':'41', 'K':'42', 'L':'43', 'M':'44', 'N':'45', 'O':'46', 'P':'47', 'Q':'48', 'R':'49', 'S':'50', 'T':'51', 'U':'52', 'V':'53', 'W':'54', 'X':'55', 'Y':'56', 'Z':'57', '_':'1', ']':'1', '[':'1', '\\':'1', '\n':'1', '`':'1', 'a':'1', 'b':'1', 'c':'1', 'd':'1', 'e':'1', 'f':'1', 'g':'1', 'h':'1', 'i':'1', 'j':'1', 'k':'1', 'l':'1', 'm':'1', 'n':'1', 'o':'1', 'p':'1', 'q':'1', 'r':'1', 's':'1', 't':'1', 'u':'1', 'v':'1', 'w':'1', 'x':'1', 'y':'1', 'z':'1', ' ':'1'} | |
| 16 READCOLUMNS = ['Sample', 'Reference', 'Fastq File', 'Size', 'Total Reads', 'Mean Read Length', 'Mean Read Quality', 'Reads Passing Q30'] | |
| 17 SEP = "\t" | |
| 18 | |
| 19 | |
| 20 def get_base_file_name(file_path): | |
| 21 base_file_name = os.path.basename(file_path) | |
| 22 if base_file_name.find(".") > 0: | |
| 23 # Eliminate the extension. | |
| 24 return os.path.splitext(base_file_name)[0] | |
| 25 elif base_file_name.find("_") > 0: | |
| 26 # The dot extension was likely changed to | |
| 27 # the " character. | |
| 28 items = base_file_name.split("_") | |
| 29 return "_".join(items[0:-1]) | |
| 30 else: | |
| 31 return base_file_name | |
| 32 | |
| 33 | |
| 34 def nice_size(size): | |
| 35 # Returns a readably formatted string with the size | |
| 36 words = ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB'] | |
| 37 prefix = '' | |
| 38 try: | |
| 39 size = float(size) | |
| 40 if size < 0: | |
| 41 size = abs(size) | |
| 42 prefix = '-' | |
| 43 except Exception: | |
| 44 return '??? bytes' | |
| 45 for ind, word in enumerate(words): | |
| 46 step = 1024 ** (ind + 1) | |
| 47 if step > size: | |
| 48 size = size / float(1024 ** ind) | |
| 49 if word == 'bytes': # No decimals for bytes | |
| 50 return "%s%d bytes" % (prefix, size) | |
| 51 return "%s%.1f %s" % (prefix, size, word) | |
| 52 return '??? bytes' | |
| 53 | |
| 54 | |
| 55 def output_read_stats(gzipped, fastq_file, ofh, sampling_number=10000, output_sample=False, dbkey=None, collection=False): | |
| 56 file_name_base = os.path.basename(fastq_file) | |
| 57 # Output a 2-column file where column 1 is | |
| 58 # the labels and column 2 is the values. | |
| 59 if output_sample: | |
| 60 # The Sample and Reference columns should be | |
| 61 # output only once, so this block handles | |
| 62 # paired reads, where the columns are not | |
| 63 # output for Read2. | |
| 64 try: | |
| 65 # Illumina read file names are something like: | |
| 66 # 13-1941-6_S4_L001_R1_600000_fastq_gz | |
| 67 sample = file_name_base.split("_")[0] | |
| 68 except Exception: | |
| 69 sample = "" | |
| 70 # Sample | |
| 71 ofh.write("Sample%s%s\n" % (SEP, sample)) | |
| 72 ofh.write("Reference%s%s\n" % (SEP, dbkey)) | |
| 73 if collection: | |
| 74 read = "Read" | |
| 75 else: | |
| 76 read = "Read1" | |
| 77 else: | |
| 78 read = "Read2" | |
| 79 # Read | |
| 80 ofh.write("%s File%s%s\n" % (read, SEP, file_name_base)) | |
| 81 # File Size | |
| 82 ofh.write("%s File Size%s%s\n" % (read, SEP, nice_size(os.path.getsize(fastq_file)))) | |
| 83 if gzipped.lower() == "true": | |
| 84 df = pandas.read_csv(gzip.open(fastq_file, "r"), header=None, sep="^") | |
| 85 else: | |
| 86 df = pandas.read_csv(open(fastq_file, "r"), header=None, sep="^") | |
| 87 total_read_count = int(len(df.index) / 4) | |
| 88 # Readx Total Reads | |
| 89 ofh.write("%s Total Reads%s%s\n" % (read, SEP, total_read_count)) | |
| 90 # Mean Read Length | |
| 91 sampling_size = int(sampling_number) | |
| 92 if sampling_size > total_read_count: | |
| 93 sampling_size = total_read_count | |
| 94 df = df.iloc[3::4].sample(sampling_size) | |
| 95 dict_mean = {} | |
| 96 list_length = [] | |
| 97 for index, row in df.iterrows(): | |
| 98 base_qualities = [] | |
| 99 for base in list(row.array[0]): | |
| 100 base_qualities.append(int(QUALITYKEY[base])) | |
| 101 dict_mean[index] = numpy.mean(base_qualities) | |
| 102 list_length.append(len(row.array[0])) | |
| 103 ofh.write("%s Mean Read Length%s%s\n" % (read, SEP, "%.1f" % numpy.mean(list_length))) | |
| 104 # Mean Read Quality | |
| 105 df_mean = pandas.DataFrame.from_dict(dict_mean, orient='index', columns=['ave']) | |
| 106 ofh.write("%s Mean Read Quality%s%s\n" % (read, SEP, "%.1f" % df_mean['ave'].mean())) | |
| 107 # Reads Passing Q30 | |
| 108 reads_gt_q30 = len(df_mean[df_mean['ave'] >= 30]) | |
| 109 reads_passing_q30 = "{:10.2f}".format(reads_gt_q30 / sampling_size) | |
| 110 ofh.write("%s reads passing Q30%s%s\n" % (read, SEP, reads_passing_q30)) | |
| 111 return total_read_count | |
| 112 | |
| 113 | |
| 114 def output_statistics(task_queue, read2, collection, gzipped, dbkey, timeout): | |
| 115 while True: | |
| 116 try: | |
| 117 tup = task_queue.get(block=True, timeout=timeout) | |
| 118 except queue.Empty: | |
| 119 break | |
| 120 read_file, idxstats_file, metrics_file, output_file = tup | |
| 121 total_reads = 0 | |
| 122 with open(output_file, "w") as ofh: | |
| 123 total_reads += output_read_stats(gzipped, read_file, ofh, output_sample=True, dbkey=dbkey, collection=collection) | |
| 124 if read2 is not None: | |
| 125 total_reads += output_read_stats(gzipped, read2, ofh) | |
| 126 ofh.write("Total Reads%s%d\n" % (SEP, total_reads)) | |
| 127 with open(idxstats_file, "r") as ifh: | |
| 128 unmapped_reads = 0 | |
| 129 for i, line in enumerate(ifh): | |
| 130 items = line.split("\t") | |
| 131 if i == 0: | |
| 132 # NC_002945.4 4349904 213570 4047 | |
| 133 ofh.write("All Mapped Reads%s%s\n" % (SEP, items[2])) | |
| 134 elif i == 1: | |
| 135 # * 0 0 82774 | |
| 136 unmapped_reads = int(items[3]) | |
| 137 ofh.write("Unmapped Reads%s%d\n" % (SEP, unmapped_reads)) | |
| 138 percent_str = "Unmapped Reads Percentage of Total" | |
| 139 if unmapped_reads > 0: | |
| 140 unmapped_reads_percentage = "{:10.2f}".format(unmapped_reads / total_reads) | |
| 141 ofh.write("%s%s%s\n" % (percent_str, SEP, unmapped_reads_percentage)) | |
| 142 else: | |
| 143 ofh.write("%s%s0\n" % (percent_str, SEP)) | |
| 144 with open(metrics_file, "r") as ifh: | |
| 145 for i, line in enumerate(ifh): | |
| 146 if i == 0: | |
| 147 # Skip comments. | |
| 148 continue | |
| 149 items = line.split("\t") | |
| 150 if i == 1: | |
| 151 # MarkDuplicates 10.338671 98.74% | |
| 152 ofh.write("Average Depth of Coverage%s%s\n" % (SEP, items[2])) | |
| 153 ofh.write("Reference with Coverage%s%s\n" % (SEP, items[3])) | |
| 154 elif i == 2: | |
| 155 # VCFfilter 611 | |
| 156 ofh.write("Good SNP Count%s%s\n" % (SEP, items[1])) | |
| 157 task_queue.task_done() | |
| 158 | |
| 159 | |
| 160 def set_num_cpus(num_files, processes): | |
| 161 num_cpus = int(multiprocessing.cpu_count()) | |
| 162 if num_files < num_cpus and num_files < processes: | |
| 163 return num_files | |
| 164 if num_cpus < processes: | |
| 165 half_cpus = int(num_cpus / 2) | |
| 166 if num_files < half_cpus: | |
| 167 return num_files | |
| 168 return half_cpus | |
| 169 return processes | |
| 170 | |
| 171 | |
| 172 if __name__ == '__main__': | |
| 173 parser = argparse.ArgumentParser() | |
| 174 | |
| 175 parser.add_argument('--read1', action='store', dest='read1', required=False, default=None, help='Required: single read') | |
| 176 parser.add_argument('--read2', action='store', dest='read2', required=False, default=None, help='Optional: paired read') | |
| 177 parser.add_argument('--dbkey', action='store', dest='dbkey', help='Reference dbkey') | |
| 178 parser.add_argument('--gzipped', action='store', dest='gzipped', help='Input files are gzipped') | |
| 179 parser.add_argument('--samtools_idxstats', action='store', dest='samtools_idxstats', required=False, default=None, help='Output of samtools_idxstats') | |
| 180 parser.add_argument('--output', action='store', dest='output', required=False, default=None, help='Output statisticsfile') | |
| 181 parser.add_argument('--vsnp_azc', action='store', dest='vsnp_azc', required=False, default=None, help='Output of vsnp_add_zero_coverage') | |
| 182 parser.add_argument('--processes', action='store', dest='processes', type=int, help='User-selected number of processes to use for job splitting') | |
| 183 | |
| 184 args = parser.parse_args() | |
| 185 | |
| 186 reads_files = [] | |
| 187 idxstats_files = [] | |
| 188 metrics_files = [] | |
| 189 output_files = [] | |
| 190 if args.output is not None: | |
| 191 # The inputs were not dataset collections, so | |
| 192 # read1, read2 (possibly) and vsnp_azc will also | |
| 193 # not be None. | |
| 194 collection = False | |
| 195 reads_files.append(args.read1) | |
| 196 idxstats_files.append(args.samtools_idxstats) | |
| 197 metrics_files.append(args.vsnp_azc) | |
| 198 output_files.append(args.output) | |
| 199 else: | |
| 200 collection = True | |
| 201 for file_name in sorted(os.listdir(INPUT_READS_DIR)): | |
| 202 file_path = os.path.abspath(os.path.join(INPUT_READS_DIR, file_name)) | |
| 203 reads_files.append(file_path) | |
| 204 base_file_name = get_base_file_name(file_path) | |
| 205 output_files.append(os.path.abspath(os.path.join(OUTPUT_DIR, "%s.tabular" % base_file_name))) | |
| 206 for file_name in sorted(os.listdir(INPUT_IDXSTATS_DIR)): | |
| 207 file_path = os.path.abspath(os.path.join(INPUT_IDXSTATS_DIR, file_name)) | |
| 208 idxstats_files.append(file_path) | |
| 209 for file_name in sorted(os.listdir(INPUT_METRICS_DIR)): | |
| 210 file_path = os.path.abspath(os.path.join(INPUT_METRICS_DIR, file_name)) | |
| 211 metrics_files.append(file_path) | |
| 212 | |
| 213 multiprocessing.set_start_method('spawn') | |
| 214 queue1 = multiprocessing.JoinableQueue() | |
| 215 num_files = len(output_files) | |
| 216 cpus = set_num_cpus(num_files, args.processes) | |
| 217 # Set a timeout for get()s in the queue. | |
| 218 timeout = 0.05 | |
| 219 | |
| 220 for i, output_file in enumerate(output_files): | |
| 221 read_file = reads_files[i] | |
| 222 idxstats_file = idxstats_files[i] | |
| 223 metrics_file = metrics_files[i] | |
| 224 queue1.put((read_file, idxstats_file, metrics_file, output_file)) | |
| 225 | |
| 226 # Complete the output_statistics task. | |
| 227 processes = [multiprocessing.Process(target=output_statistics, args=(queue1, args.read2, collection, args.gzipped, args.dbkey, timeout, )) for _ in range(cpus)] | |
| 228 for p in processes: | |
| 229 p.start() | |
| 230 for p in processes: | |
| 231 p.join() | |
| 232 queue1.join() | |
| 233 | |
| 234 if queue1.empty(): | |
| 235 queue1.close() | |
| 236 queue1.join_thread() |
