Mercurial > repos > iuc > vsnp_determine_ref_from_data
comparison vsnp_build_tables.py @ 0:12f2b14549f6 draft
"planemo upload for repository https://github.com/galaxyproject/tools-iuc/tree/master/tools/vsnp commit 524a39e08f2bea8b8754284df606ff8dd27ed24b"
| author | iuc | 
|---|---|
| date | Wed, 02 Dec 2020 09:11:24 +0000 | 
| parents | |
| children | b03e88e7bb1d | 
   comparison
  equal
  deleted
  inserted
  replaced
| -1:000000000000 | 0:12f2b14549f6 | 
|---|---|
| 1 #!/usr/bin/env python | |
| 2 | |
| 3 import argparse | |
| 4 import multiprocessing | |
| 5 import os | |
| 6 import queue | |
| 7 import re | |
| 8 | |
| 9 import pandas | |
| 10 import pandas.io.formats.excel | |
| 11 from Bio import SeqIO | |
| 12 | |
| 13 INPUT_JSON_AVG_MQ_DIR = 'input_json_avg_mq_dir' | |
| 14 INPUT_JSON_DIR = 'input_json_dir' | |
| 15 INPUT_NEWICK_DIR = 'input_newick_dir' | |
| 16 # Maximum columns allowed in a LibreOffice | |
| 17 # spreadsheet is 1024. Excel allows for | |
| 18 # 16,384 columns, but we'll set the lower | |
| 19 # number as the maximum. Some browsers | |
| 20 # (e.g., Firefox on Linux) are configured | |
| 21 # to use LibreOffice for Excel spreadsheets. | |
| 22 MAXCOLS = 1024 | |
| 23 OUTPUT_EXCEL_DIR = 'output_excel_dir' | |
| 24 | |
| 25 | |
| 26 def annotate_table(table_df, group, annotation_dict): | |
| 27 for gbk_chrome, pro in list(annotation_dict.items()): | |
| 28 ref_pos = list(table_df) | |
| 29 ref_series = pandas.Series(ref_pos) | |
| 30 ref_df = pandas.DataFrame(ref_series.str.split(':', expand=True).values, columns=['reference', 'position']) | |
| 31 all_ref = ref_df[ref_df['reference'] == gbk_chrome] | |
| 32 positions = all_ref.position.to_frame() | |
| 33 # Create an annotation file. | |
| 34 annotation_file = "%s_annotations.csv" % group | |
| 35 with open(annotation_file, "a") as fh: | |
| 36 for _, row in positions.iterrows(): | |
| 37 pos = row.position | |
| 38 try: | |
| 39 aaa = pro.iloc[pro.index.get_loc(int(pos))][['chrom', 'locus', 'product', 'gene']] | |
| 40 try: | |
| 41 chrom, name, locus, tag = aaa.values[0] | |
| 42 print("{}:{}\t{}, {}, {}".format(chrom, pos, locus, tag, name), file=fh) | |
| 43 except ValueError: | |
| 44 # If only one annotation for the entire | |
| 45 # chromosome (e.g., flu) then having [0] fails | |
| 46 chrom, name, locus, tag = aaa.values | |
| 47 print("{}:{}\t{}, {}, {}".format(chrom, pos, locus, tag, name), file=fh) | |
| 48 except KeyError: | |
| 49 print("{}:{}\tNo annotated product".format(gbk_chrome, pos), file=fh) | |
| 50 # Read the annotation file into a data frame. | |
| 51 annotations_df = pandas.read_csv(annotation_file, sep='\t', header=None, names=['index', 'annotations'], index_col='index') | |
| 52 # Remove the annotation_file from disk since both | |
| 53 # cascade and sort tables are built using the file, | |
| 54 # and it is opened for writing in append mode. | |
| 55 os.remove(annotation_file) | |
| 56 # Process the data. | |
| 57 table_df_transposed = table_df.T | |
| 58 table_df_transposed.index = table_df_transposed.index.rename('index') | |
| 59 table_df_transposed = table_df_transposed.merge(annotations_df, left_index=True, right_index=True) | |
| 60 table_df = table_df_transposed.T | |
| 61 return table_df | |
| 62 | |
| 63 | |
| 64 def excel_formatter(json_file_name, excel_file_name, group, annotation_dict): | |
| 65 pandas.io.formats.excel.header_style = None | |
| 66 table_df = pandas.read_json(json_file_name, orient='split') | |
| 67 if annotation_dict is not None: | |
| 68 table_df = annotate_table(table_df, group, annotation_dict) | |
| 69 else: | |
| 70 table_df = table_df.append(pandas.Series(name='no annotations')) | |
| 71 writer = pandas.ExcelWriter(excel_file_name, engine='xlsxwriter') | |
| 72 table_df.to_excel(writer, sheet_name='Sheet1') | |
| 73 writer_book = writer.book | |
| 74 ws = writer.sheets['Sheet1'] | |
| 75 format_a = writer_book.add_format({'bg_color': '#58FA82'}) | |
| 76 format_g = writer_book.add_format({'bg_color': '#F7FE2E'}) | |
| 77 format_c = writer_book.add_format({'bg_color': '#0000FF'}) | |
| 78 format_t = writer_book.add_format({'bg_color': '#FF0000'}) | |
| 79 format_normal = writer_book.add_format({'bg_color': '#FDFEFE'}) | |
| 80 formatlowqual = writer_book.add_format({'font_color': '#C70039', 'bg_color': '#E2CFDD'}) | |
| 81 format_ambigous = writer_book.add_format({'font_color': '#C70039', 'bg_color': '#E2CFDD'}) | |
| 82 format_n = writer_book.add_format({'bg_color': '#E2CFDD'}) | |
| 83 rows, cols = table_df.shape | |
| 84 ws.set_column(0, 0, 30) | |
| 85 ws.set_column(1, cols, 2.1) | |
| 86 ws.freeze_panes(2, 1) | |
| 87 format_annotation = writer_book.add_format({'font_color': '#0A028C', 'rotation': '-90', 'align': 'top'}) | |
| 88 # Set last row. | |
| 89 ws.set_row(rows + 1, cols + 1, format_annotation) | |
| 90 # Make sure that row/column locations don't overlap. | |
| 91 ws.conditional_format(rows - 2, 1, rows - 1, cols, {'type': 'cell', 'criteria': '<', 'value': 55, 'format': formatlowqual}) | |
| 92 ws.conditional_format(2, 1, rows - 2, cols, {'type': 'cell', 'criteria': '==', 'value': 'B$2', 'format': format_normal}) | |
| 93 ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'A', 'format': format_a}) | |
| 94 ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'G', 'format': format_g}) | |
| 95 ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'C', 'format': format_c}) | |
| 96 ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'T', 'format': format_t}) | |
| 97 ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'S', 'format': format_ambigous}) | |
| 98 ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'Y', 'format': format_ambigous}) | |
| 99 ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'R', 'format': format_ambigous}) | |
| 100 ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'W', 'format': format_ambigous}) | |
| 101 ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'K', 'format': format_ambigous}) | |
| 102 ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'M', 'format': format_ambigous}) | |
| 103 ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': 'N', 'format': format_n}) | |
| 104 ws.conditional_format(2, 1, rows - 2, cols, {'type': 'text', 'criteria': 'containing', 'value': '-', 'format': format_n}) | |
| 105 format_rotation = writer_book.add_format({}) | |
| 106 format_rotation.set_rotation(90) | |
| 107 for column_num, column_name in enumerate(list(table_df.columns)): | |
| 108 ws.write(0, column_num + 1, column_name, format_rotation) | |
| 109 format_annotation = writer_book.add_format({'font_color': '#0A028C', 'rotation': '-90', 'align': 'top'}) | |
| 110 # Set last row. | |
| 111 ws.set_row(rows, 400, format_annotation) | |
| 112 writer.save() | |
| 113 | |
| 114 | |
| 115 def get_annotation_dict(gbk_file): | |
| 116 gbk_dict = SeqIO.to_dict(SeqIO.parse(gbk_file, "genbank")) | |
| 117 annotation_dict = {} | |
| 118 tmp_file = "features.csv" | |
| 119 # Create a file of chromosomes and features. | |
| 120 for chromosome in list(gbk_dict.keys()): | |
| 121 with open(tmp_file, 'w+') as fh: | |
| 122 for feature in gbk_dict[chromosome].features: | |
| 123 if "CDS" in feature.type or "rRNA" in feature.type: | |
| 124 try: | |
| 125 product = feature.qualifiers['product'][0] | |
| 126 except KeyError: | |
| 127 product = None | |
| 128 try: | |
| 129 locus = feature.qualifiers['locus_tag'][0] | |
| 130 except KeyError: | |
| 131 locus = None | |
| 132 try: | |
| 133 gene = feature.qualifiers['gene'][0] | |
| 134 except KeyError: | |
| 135 gene = None | |
| 136 fh.write("%s\t%d\t%d\t%s\t%s\t%s\n" % (chromosome, int(feature.location.start), int(feature.location.end), locus, product, gene)) | |
| 137 # Read the chromosomes and features file into a data frame. | |
| 138 df = pandas.read_csv(tmp_file, sep='\t', names=["chrom", "start", "stop", "locus", "product", "gene"]) | |
| 139 # Process the data. | |
| 140 df = df.sort_values(['start', 'gene'], ascending=[True, False]) | |
| 141 df = df.drop_duplicates('start') | |
| 142 pro = df.reset_index(drop=True) | |
| 143 pro.index = pandas.IntervalIndex.from_arrays(pro['start'], pro['stop'], closed='both') | |
| 144 annotation_dict[chromosome] = pro | |
| 145 return annotation_dict | |
| 146 | |
| 147 | |
| 148 def get_base_file_name(file_path): | |
| 149 base_file_name = os.path.basename(file_path) | |
| 150 if base_file_name.find(".") > 0: | |
| 151 # Eliminate the extension. | |
| 152 return os.path.splitext(base_file_name)[0] | |
| 153 elif base_file_name.find("_") > 0: | |
| 154 # The dot extension was likely changed to | |
| 155 # the " character. | |
| 156 items = base_file_name.split("_") | |
| 157 return "_".join(items[0:-1]) | |
| 158 else: | |
| 159 return base_file_name | |
| 160 | |
| 161 | |
| 162 def output_cascade_table(cascade_order, mqdf, group, annotation_dict): | |
| 163 cascade_order_mq = pandas.concat([cascade_order, mqdf], join='inner') | |
| 164 output_table(cascade_order_mq, "cascade", group, annotation_dict) | |
| 165 | |
| 166 | |
| 167 def output_excel(df, type_str, group, annotation_dict, count=None): | |
| 168 # Output the temporary json file that | |
| 169 # is used by the excel_formatter. | |
| 170 if count is None: | |
| 171 if group is None: | |
| 172 json_file_name = "%s_order_mq.json" % type_str | |
| 173 excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_table.xlsx" % type_str) | |
| 174 else: | |
| 175 json_file_name = "%s_%s_order_mq.json" % (group, type_str) | |
| 176 excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_%s_table.xlsx" % (group, type_str)) | |
| 177 else: | |
| 178 if group is None: | |
| 179 json_file_name = "%s_order_mq_%d.json" % (type_str, count) | |
| 180 excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_table_%d.xlsx" % (type_str, count)) | |
| 181 else: | |
| 182 json_file_name = "%s_%s_order_mq_%d.json" % (group, type_str, count) | |
| 183 excel_file_name = os.path.join(OUTPUT_EXCEL_DIR, "%s_%s_table_%d.xlsx" % (group, type_str, count)) | |
| 184 df.to_json(json_file_name, orient='split') | |
| 185 # Output the Excel file. | |
| 186 excel_formatter(json_file_name, excel_file_name, group, annotation_dict) | |
| 187 | |
| 188 | |
| 189 def output_sort_table(cascade_order, mqdf, group, annotation_dict): | |
| 190 sort_df = cascade_order.T | |
| 191 sort_df['abs_value'] = sort_df.index | |
| 192 sort_df[['chrom', 'pos']] = sort_df['abs_value'].str.split(':', expand=True) | |
| 193 sort_df = sort_df.drop(['abs_value', 'chrom'], axis=1) | |
| 194 sort_df.pos = sort_df.pos.astype(int) | |
| 195 sort_df = sort_df.sort_values(by=['pos']) | |
| 196 sort_df = sort_df.drop(['pos'], axis=1) | |
| 197 sort_df = sort_df.T | |
| 198 sort_order_mq = pandas.concat([sort_df, mqdf], join='inner') | |
| 199 output_table(sort_order_mq, "sort", group, annotation_dict) | |
| 200 | |
| 201 | |
| 202 def output_table(df, type_str, group, annotation_dict): | |
| 203 if isinstance(group, str) and group.startswith("dataset"): | |
| 204 # Inputs are single files, not collections, | |
| 205 # so input file names are not useful for naming | |
| 206 # output files. | |
| 207 group_str = None | |
| 208 else: | |
| 209 group_str = group | |
| 210 count = 0 | |
| 211 chunk_start = 0 | |
| 212 chunk_end = 0 | |
| 213 column_count = df.shape[1] | |
| 214 if column_count >= MAXCOLS: | |
| 215 # Here the number of columns is greater than | |
| 216 # the maximum allowed by Excel, so multiple | |
| 217 # outputs will be produced. | |
| 218 while column_count >= MAXCOLS: | |
| 219 count += 1 | |
| 220 chunk_end += MAXCOLS | |
| 221 df_of_type = df.iloc[:, chunk_start:chunk_end] | |
| 222 output_excel(df_of_type, type_str, group_str, annotation_dict, count=count) | |
| 223 chunk_start += MAXCOLS | |
| 224 column_count -= MAXCOLS | |
| 225 count += 1 | |
| 226 df_of_type = df.iloc[:, chunk_start:] | |
| 227 output_excel(df_of_type, type_str, group_str, annotation_dict, count=count) | |
| 228 else: | |
| 229 output_excel(df, type_str, group_str, annotation_dict) | |
| 230 | |
| 231 | |
| 232 def preprocess_tables(task_queue, annotation_dict, timeout): | |
| 233 while True: | |
| 234 try: | |
| 235 tup = task_queue.get(block=True, timeout=timeout) | |
| 236 except queue.Empty: | |
| 237 break | |
| 238 newick_file, json_file, json_avg_mq_file = tup | |
| 239 avg_mq_series = pandas.read_json(json_avg_mq_file, typ='series', orient='split') | |
| 240 # Map quality to dataframe. | |
| 241 mqdf = avg_mq_series.to_frame(name='MQ') | |
| 242 mqdf = mqdf.T | |
| 243 # Get the group. | |
| 244 group = get_base_file_name(newick_file) | |
| 245 snps_df = pandas.read_json(json_file, orient='split') | |
| 246 with open(newick_file, 'r') as fh: | |
| 247 for line in fh: | |
| 248 line = re.sub('[:,]', '\n', line) | |
| 249 line = re.sub('[)(]', '', line) | |
| 250 line = re.sub(r'[0-9].*\.[0-9].*\n', '', line) | |
| 251 line = re.sub('root\n', '', line) | |
| 252 sample_order = line.split('\n') | |
| 253 sample_order = list([_f for _f in sample_order if _f]) | |
| 254 sample_order.insert(0, 'root') | |
| 255 tree_order = snps_df.loc[sample_order] | |
| 256 # Count number of SNPs in each column. | |
| 257 snp_per_column = [] | |
| 258 for column_header in tree_order: | |
| 259 count = 0 | |
| 260 column = tree_order[column_header] | |
| 261 for element in column: | |
| 262 if element != column[0]: | |
| 263 count = count + 1 | |
| 264 snp_per_column.append(count) | |
| 265 row1 = pandas.Series(snp_per_column, tree_order.columns, name="snp_per_column") | |
| 266 # Count number of SNPS from the | |
| 267 # top of each column in the table. | |
| 268 snp_from_top = [] | |
| 269 for column_header in tree_order: | |
| 270 count = 0 | |
| 271 column = tree_order[column_header] | |
| 272 # for each element in the column | |
| 273 # skip the first element | |
| 274 for element in column[1:]: | |
| 275 if element == column[0]: | |
| 276 count = count + 1 | |
| 277 else: | |
| 278 break | |
| 279 snp_from_top.append(count) | |
| 280 row2 = pandas.Series(snp_from_top, tree_order.columns, name="snp_from_top") | |
| 281 tree_order = tree_order.append([row1]) | |
| 282 tree_order = tree_order.append([row2]) | |
| 283 # In pandas=0.18.1 even this does not work: | |
| 284 # abc = row1.to_frame() | |
| 285 # abc = abc.T --> tree_order.shape (5, 18), abc.shape (1, 18) | |
| 286 # tree_order.append(abc) | |
| 287 # Continue to get error: "*** ValueError: all the input arrays must have same number of dimensions" | |
| 288 tree_order = tree_order.T | |
| 289 tree_order = tree_order.sort_values(['snp_from_top', 'snp_per_column'], ascending=[True, False]) | |
| 290 tree_order = tree_order.T | |
| 291 # Remove snp_per_column and snp_from_top rows. | |
| 292 cascade_order = tree_order[:-2] | |
| 293 # Output the cascade table. | |
| 294 output_cascade_table(cascade_order, mqdf, group, annotation_dict) | |
| 295 # Output the sorted table. | |
| 296 output_sort_table(cascade_order, mqdf, group, annotation_dict) | |
| 297 task_queue.task_done() | |
| 298 | |
| 299 | |
| 300 def set_num_cpus(num_files, processes): | |
| 301 num_cpus = int(multiprocessing.cpu_count()) | |
| 302 if num_files < num_cpus and num_files < processes: | |
| 303 return num_files | |
| 304 if num_cpus < processes: | |
| 305 half_cpus = int(num_cpus / 2) | |
| 306 if num_files < half_cpus: | |
| 307 return num_files | |
| 308 return half_cpus | |
| 309 return processes | |
| 310 | |
| 311 | |
| 312 if __name__ == '__main__': | |
| 313 parser = argparse.ArgumentParser() | |
| 314 | |
| 315 parser.add_argument('--input_avg_mq_json', action='store', dest='input_avg_mq_json', required=False, default=None, help='Average MQ json file') | |
| 316 parser.add_argument('--input_newick', action='store', dest='input_newick', required=False, default=None, help='Newick file') | |
| 317 parser.add_argument('--input_snps_json', action='store', dest='input_snps_json', required=False, default=None, help='SNPs json file') | |
| 318 parser.add_argument('--gbk_file', action='store', dest='gbk_file', required=False, default=None, help='Optional gbk file'), | |
| 319 parser.add_argument('--processes', action='store', dest='processes', type=int, help='User-selected number of processes to use for job splitting') | |
| 320 | |
| 321 args = parser.parse_args() | |
| 322 | |
| 323 if args.gbk_file is not None: | |
| 324 # Create the annotation_dict for annotating | |
| 325 # the Excel tables. | |
| 326 annotation_dict = get_annotation_dict(args.gbk_file) | |
| 327 else: | |
| 328 annotation_dict = None | |
| 329 | |
| 330 # The assumption here is that the list of files | |
| 331 # in both INPUT_NEWICK_DIR and INPUT_JSON_DIR are | |
| 332 # named such that they are properly matched if | |
| 333 # the directories contain more than 1 file (i.e., | |
| 334 # hopefully the newick file names and json file names | |
| 335 # will be something like Mbovis-01D6_* so they can be | |
| 336 # sorted and properly associated with each other). | |
| 337 if args.input_newick is not None: | |
| 338 newick_files = [args.input_newick] | |
| 339 else: | |
| 340 newick_files = [] | |
| 341 for file_name in sorted(os.listdir(INPUT_NEWICK_DIR)): | |
| 342 file_path = os.path.abspath(os.path.join(INPUT_NEWICK_DIR, file_name)) | |
| 343 newick_files.append(file_path) | |
| 344 if args.input_snps_json is not None: | |
| 345 json_files = [args.input_snps_json] | |
| 346 else: | |
| 347 json_files = [] | |
| 348 for file_name in sorted(os.listdir(INPUT_JSON_DIR)): | |
| 349 file_path = os.path.abspath(os.path.join(INPUT_JSON_DIR, file_name)) | |
| 350 json_files.append(file_path) | |
| 351 if args.input_avg_mq_json is not None: | |
| 352 json_avg_mq_files = [args.input_avg_mq_json] | |
| 353 else: | |
| 354 json_avg_mq_files = [] | |
| 355 for file_name in sorted(os.listdir(INPUT_JSON_AVG_MQ_DIR)): | |
| 356 file_path = os.path.abspath(os.path.join(INPUT_JSON_AVG_MQ_DIR, file_name)) | |
| 357 json_avg_mq_files.append(file_path) | |
| 358 | |
| 359 multiprocessing.set_start_method('spawn') | |
| 360 queue1 = multiprocessing.JoinableQueue() | |
| 361 queue2 = multiprocessing.JoinableQueue() | |
| 362 num_files = len(newick_files) | |
| 363 cpus = set_num_cpus(num_files, args.processes) | |
| 364 # Set a timeout for get()s in the queue. | |
| 365 timeout = 0.05 | |
| 366 | |
| 367 for i, newick_file in enumerate(newick_files): | |
| 368 json_file = json_files[i] | |
| 369 json_avg_mq_file = json_avg_mq_files[i] | |
| 370 queue1.put((newick_file, json_file, json_avg_mq_file)) | |
| 371 | |
| 372 # Complete the preprocess_tables task. | |
| 373 processes = [multiprocessing.Process(target=preprocess_tables, args=(queue1, annotation_dict, timeout, )) for _ in range(cpus)] | |
| 374 for p in processes: | |
| 375 p.start() | |
| 376 for p in processes: | |
| 377 p.join() | |
| 378 queue1.join() | |
| 379 | |
| 380 if queue1.empty(): | |
| 381 queue1.close() | |
| 382 queue1.join_thread() | 
