Mercurial > repos > mheinzl > fsd
comparison fsd.py @ 16:6bd9ef49d013 draft
planemo upload for repository https://github.com/monikaheinzl/duplexanalysis_galaxy/tree/master/tools/fsd commit dfaab79252a858e8df16bbea3607ebf1b6962e5a
author | mheinzl |
---|---|
date | Mon, 08 Oct 2018 05:50:18 -0400 |
parents | |
children | 2e517a54eedc |
comparison
equal
deleted
inserted
replaced
15:32921a67437b | 16:6bd9ef49d013 |
---|---|
1 #!/usr/bin/env python | |
2 | |
3 # Family size distribution of SSCSs | |
4 # | |
5 # Author: Monika Heinzl, Johannes-Kepler University Linz (Austria) | |
6 # Contact: monika.heinzl@edumail.at | |
7 # | |
8 # Takes at least one TABULAR file with tags before the alignment to the SSCS, but up to 4 files can be provided, as input. | |
9 # The program produces a plot which shows the distribution of family sizes of the all SSCSs from the input files and | |
10 # a tabular file with the data of the plot, as well as a TXT file with all tags of the DCS and their family sizes. | |
11 # If only one file is provided, then a family size distribution, which is separated after SSCSs without a partner and DCSs, is produced. | |
12 # Whereas a family size distribution with multiple data in one plot is produced, when more than one file (up to 4) is given. | |
13 | |
14 # USAGE: python FSD_Galaxy_1.4_commandLine_FINAL.py --inputFile1 filename --inputName1 filename --inputFile2 filename2 --inputName2 filename2 --inputFile3 filename3 --inputName3 filename3 --inputFile4 filename4 --inputName4 filename4 --output_tabular outptufile_name_tabular --output_pdf outptufile_name_pdf | |
15 | |
16 import argparse | |
17 import sys | |
18 | |
19 import matplotlib.pyplot as plt | |
20 import numpy | |
21 from matplotlib.backends.backend_pdf import PdfPages | |
22 | |
23 plt.switch_backend('agg') | |
24 | |
25 | |
26 def readFileReferenceFree(file): | |
27 with open(file, 'r') as dest_f: | |
28 data_array = numpy.genfromtxt(dest_f, skip_header=0, delimiter='\t', comments='#', dtype='string') | |
29 return(data_array) | |
30 | |
31 | |
32 def make_argparser(): | |
33 parser = argparse.ArgumentParser(description='Family Size Distribution of duplex sequencing data') | |
34 parser.add_argument('--inputFile1', help='Tabular File with three columns: ab or ba, tag and family size.') | |
35 parser.add_argument('--inputName1') | |
36 parser.add_argument('--inputFile2', default=None, help='Tabular File with three columns: ab or ba, tag and family size.') | |
37 parser.add_argument('--inputName2') | |
38 parser.add_argument('--inputFile3', default=None, help='Tabular File with three columns: ab or ba, tag and family size.') | |
39 parser.add_argument('--inputName3') | |
40 parser.add_argument('--inputFile4', default=None, help='Tabular File with three columns: ab or ba, tag and family size.') | |
41 parser.add_argument('--inputName4') | |
42 parser.add_argument('--output_pdf', default="data.pdf", type=str, help='Name of the pdf file.') | |
43 parser.add_argument('--output_tabular', default="data.tabular", type=str, help='Name of the tabular file.') | |
44 return parser | |
45 | |
46 | |
47 def compare_read_families(argv): | |
48 parser = make_argparser() | |
49 args = parser.parse_args(argv[1:]) | |
50 | |
51 firstFile = args.inputFile1 | |
52 name1 = args.inputName1 | |
53 | |
54 secondFile = args.inputFile2 | |
55 name2 = args.inputName2 | |
56 thirdFile = args.inputFile3 | |
57 name3 = args.inputName3 | |
58 fourthFile = args.inputFile4 | |
59 name4 = args.inputName4 | |
60 | |
61 title_file = args.output_tabular | |
62 title_file2 = args.output_pdf | |
63 | |
64 sep = "\t" | |
65 | |
66 plt.rc('figure', figsize=(11.69, 8.27)) # A4 format | |
67 plt.rcParams['patch.edgecolor'] = "black" | |
68 plt.rcParams['axes.facecolor'] = "E0E0E0" # grey background color | |
69 plt.rcParams['xtick.labelsize'] = 14 | |
70 plt.rcParams['ytick.labelsize'] = 14 | |
71 | |
72 list_to_plot = [] | |
73 label = [] | |
74 data_array_list = [] | |
75 with open(title_file, "w") as output_file, PdfPages(title_file2) as pdf: | |
76 fig = plt.figure() | |
77 plt.subplots_adjust(bottom=0.25) | |
78 if firstFile != str(None): | |
79 file1 = readFileReferenceFree(firstFile) | |
80 integers = numpy.array(file1[:, 0]).astype(int) # keep original family sizes | |
81 | |
82 # for plot: replace all big family sizes by 22 | |
83 data1 = numpy.array(file1[:, 0]).astype(int) | |
84 bigFamilies = numpy.where(data1 > 20)[0] | |
85 data1[bigFamilies] = 22 | |
86 | |
87 name1 = name1.split(".tabular")[0] | |
88 list_to_plot.append(data1) | |
89 label.append(name1) | |
90 data_array_list.append(file1) | |
91 | |
92 legend = "\n\n\n{}".format(name1) | |
93 plt.text(0.1, 0.11, legend, size=12, transform=plt.gcf().transFigure) | |
94 legend1 = "singletons:\nabsolute nr.\n{:,}".format(numpy.bincount(data1)[1]) | |
95 plt.text(0.4, 0.11, legend1, size=12, transform=plt.gcf().transFigure) | |
96 | |
97 legend3 = "rel. freq\n{:.3f}".format(float(numpy.bincount(data1)[1]) / len(data1)) | |
98 plt.text(0.5, 0.11, legend3, size=12, transform=plt.gcf().transFigure) | |
99 | |
100 legend4 = "family size > 20:\nabsolute nr.\n{:,}".format(numpy.bincount(data1)[len(numpy.bincount(data1)) - 1].astype(int)) | |
101 plt.text(0.6, 0.11, legend4, size=12, transform=plt.gcf().transFigure) | |
102 | |
103 legend5 = "rel. freq\n{:.3f}".format(float(numpy.bincount(data1)[len(numpy.bincount(data1)) - 1]) / len(data1)) | |
104 plt.text(0.7, 0.11, legend5, size=12, transform=plt.gcf().transFigure) | |
105 | |
106 legend6 = "total length\n{:,}".format(len(data1)) | |
107 plt.text(0.8, 0.11, legend6, size=12, transform=plt.gcf().transFigure) | |
108 | |
109 if secondFile != str(None): | |
110 file2 = readFileReferenceFree(secondFile) | |
111 data2 = numpy.asarray(file2[:, 0]).astype(int) | |
112 bigFamilies2 = numpy.where(data2 > 20)[0] | |
113 data2[bigFamilies2] = 22 | |
114 | |
115 list_to_plot.append(data2) | |
116 name2 = name2.split(".tabular")[0] | |
117 label.append(name2) | |
118 data_array_list.append(file2) | |
119 | |
120 plt.text(0.1, 0.09, name2, size=12, transform=plt.gcf().transFigure) | |
121 | |
122 legend1 = "{:,}".format(numpy.bincount(data2)[1]) | |
123 plt.text(0.4, 0.09, legend1, size=12, transform=plt.gcf().transFigure) | |
124 | |
125 legend3 = "{:.3f}".format(float(numpy.bincount(data2)[1]) / len(data2)) | |
126 plt.text(0.5, 0.09, legend3, size=12, transform=plt.gcf().transFigure) | |
127 | |
128 legend4 = "{:,}".format(numpy.bincount(data2)[len(numpy.bincount(data2)) - 1].astype(int)) | |
129 plt.text(0.6, 0.09, legend4, size=12, transform=plt.gcf().transFigure) | |
130 | |
131 legend5 = "{:.3f}".format(float(numpy.bincount(data2)[len(numpy.bincount(data2)) - 1]) / len(data2)) | |
132 plt.text(0.7, 0.09, legend5, size=12, transform=plt.gcf().transFigure) | |
133 | |
134 legend6 = "{:,}".format(len(data2)) | |
135 plt.text(0.8, 0.09, legend6, size=12, transform=plt.gcf().transFigure) | |
136 | |
137 if thirdFile != str(None): | |
138 file3 = readFileReferenceFree(thirdFile) | |
139 | |
140 data3 = numpy.asarray(file3[:, 0]).astype(int) | |
141 bigFamilies3 = numpy.where(data3 > 20)[0] | |
142 data3[bigFamilies3] = 22 | |
143 | |
144 list_to_plot.append(data3) | |
145 name3 = name3.split(".tabular")[0] | |
146 label.append(name3) | |
147 data_array_list.append(file3) | |
148 | |
149 plt.text(0.1, 0.07, name3, size=12, transform=plt.gcf().transFigure) | |
150 | |
151 legend1 = "{:,}".format(numpy.bincount(data3)[1]) | |
152 plt.text(0.4, 0.07, legend1, size=12, transform=plt.gcf().transFigure) | |
153 | |
154 legend3 = "{:.3f}".format(float(numpy.bincount(data3)[1]) / len(data3)) | |
155 plt.text(0.5, 0.07, legend3, size=12, transform=plt.gcf().transFigure) | |
156 | |
157 legend4 = "{:,}".format(numpy.bincount(data3)[len(numpy.bincount(data3)) - 1].astype(int)) | |
158 plt.text(0.6, 0.07, legend4, size=12, transform=plt.gcf().transFigure) | |
159 | |
160 legend5 = "{:.3f}".format(float(numpy.bincount(data3)[len(numpy.bincount(data3)) - 1]) / len(data3)) | |
161 plt.text(0.7, 0.07, legend5, size=12, transform=plt.gcf().transFigure) | |
162 | |
163 legend6 = "{:,}".format(len(data3)) | |
164 plt.text(0.8, 0.07, legend6, size=12, transform=plt.gcf().transFigure) | |
165 | |
166 if fourthFile != str(None): | |
167 file4 = readFileReferenceFree(fourthFile) | |
168 | |
169 data4 = numpy.asarray(file4[:, 0]).astype(int) | |
170 | |
171 bigFamilies4 = numpy.where(data4 > 20)[0] | |
172 data4[bigFamilies4] = 22 | |
173 | |
174 list_to_plot.append(data4) | |
175 name4 = name4.split(".tabular")[0] | |
176 label.append(name4) | |
177 data_array_list.append(file4) | |
178 | |
179 plt.text(0.1, 0.05, name4, size=12, transform=plt.gcf().transFigure) | |
180 | |
181 legend1 = "{:,}".format(numpy.bincount(data4)[1]) | |
182 plt.text(0.4, 0.05, legend1, size=12, transform=plt.gcf().transFigure) | |
183 | |
184 legend4 = "{:.3f}".format(float(numpy.bincount(data4)[1]) / len(data4)) | |
185 plt.text(0.5, 0.05, legend4, size=12, transform=plt.gcf().transFigure) | |
186 | |
187 legend4 = "{:,}".format(numpy.bincount(data4)[len(numpy.bincount(data4)) - 1].astype(int)) | |
188 plt.text(0.6, 0.05, legend4, size=12, transform=plt.gcf().transFigure) | |
189 | |
190 legend5 = "{:.3f}".format(float(numpy.bincount(data4)[len(numpy.bincount(data4)) - 1]) / len(data4)) | |
191 plt.text(0.7, 0.05, legend5, size=12, transform=plt.gcf().transFigure) | |
192 | |
193 legend6 = "{:,}".format(len(data4)) | |
194 plt.text(0.8, 0.05, legend6, size=12, transform=plt.gcf().transFigure) | |
195 | |
196 maximumX = numpy.amax(numpy.concatenate(list_to_plot)) | |
197 minimumX = numpy.amin(numpy.concatenate(list_to_plot)) | |
198 | |
199 counts = plt.hist(list_to_plot, bins=range(minimumX, maximumX + 1), stacked=False, edgecolor="black", | |
200 linewidth=1, label=label, align="left", alpha=0.7, rwidth=0.8) | |
201 | |
202 ticks = numpy.arange(minimumX - 1, maximumX, 1) | |
203 ticks1 = map(str, ticks) | |
204 ticks1[len(ticks1) - 1] = ">20" | |
205 plt.xticks(numpy.array(ticks), ticks1) | |
206 | |
207 plt.legend(loc='upper right', fontsize=14, frameon=True, bbox_to_anchor=(0.9, 1)) | |
208 # plt.title("Family Size Distribution", fontsize=14) | |
209 plt.xlabel("Family size", fontsize=14) | |
210 plt.ylabel("Absolute Frequency", fontsize=14) | |
211 plt.margins(0.01, None) | |
212 plt.grid(b=True, which="major", color="#424242", linestyle=":") | |
213 pdf.savefig(fig) | |
214 plt.close() | |
215 | |
216 # write data to CSV file | |
217 output_file.write("Values from family size distribution with all datasets\n") | |
218 output_file.write("\nFamily size") | |
219 for i in label: | |
220 output_file.write("{}{}".format(sep, i)) | |
221 # output_file.write("{}sum".format(sep)) | |
222 output_file.write("\n") | |
223 j = 0 | |
224 for fs in counts[1][0:len(counts[1]) - 1]: | |
225 if fs == 21: | |
226 fs = ">20" | |
227 else: | |
228 fs = "={}".format(fs) | |
229 output_file.write("FS{}{}".format(fs, sep)) | |
230 if len(label) == 1: | |
231 output_file.write("{}{}".format(int(counts[0][j]), sep)) | |
232 else: | |
233 for n in range(len(label)): | |
234 output_file.write("{}{}".format(int(counts[0][n][j]), sep)) | |
235 output_file.write("\n") | |
236 j += 1 | |
237 output_file.write("sum{}".format(sep)) | |
238 if len(label) == 1: | |
239 output_file.write("{}{}".format(int(sum(counts[0])), sep)) | |
240 else: | |
241 for i in counts[0]: | |
242 output_file.write("{}{}".format(int(sum(i)), sep)) | |
243 | |
244 # Family size distribution after DCS and SSCS | |
245 for dataset, data, name_file in zip(list_to_plot, data_array_list, label): | |
246 maximumX = numpy.amax(dataset) | |
247 minimumX = numpy.amin(dataset) | |
248 | |
249 tags = numpy.array(data[:, 2]) | |
250 seq = numpy.array(data[:, 1]) | |
251 data = numpy.array(dataset) | |
252 | |
253 # find all unique tags and get the indices for ALL tags, but only once | |
254 u, index_unique, c = numpy.unique(numpy.array(seq), return_counts=True, return_index=True) | |
255 d = u[c > 1] | |
256 | |
257 # get family sizes, tag for duplicates | |
258 duplTags_double = data[numpy.in1d(seq, d)] | |
259 duplTags = duplTags_double[0::2] # ab of DCS | |
260 duplTagsBA = duplTags_double[1::2] # ba of DCS | |
261 | |
262 # duplTags_double_tag = tags[numpy.in1d(seq, d)] | |
263 # duplTags_double_seq = seq[numpy.in1d(seq, d)] | |
264 | |
265 # get family sizes for SSCS with no partner | |
266 ab = numpy.where(tags == "ab")[0] | |
267 abSeq = seq[ab] | |
268 ab = data[ab] | |
269 ba = numpy.where(tags == "ba")[0] | |
270 baSeq = seq[ba] | |
271 ba = data[ba] | |
272 | |
273 dataAB = ab[numpy.in1d(abSeq, d, invert=True)] | |
274 dataBA = ba[numpy.in1d(baSeq, d, invert=True)] | |
275 | |
276 list1 = [duplTags_double, dataAB, dataBA] # list for plotting | |
277 | |
278 # information for family size >= 3 | |
279 dataAB_FS3 = dataAB[dataAB >= 3] | |
280 dataBA_FS3 = dataBA[dataBA >= 3] | |
281 ab_FS3 = ab[ab >= 3] | |
282 ba_FS3 = ba[ba >= 3] | |
283 | |
284 duplTags_FS3 = duplTags[(duplTags >= 3) & (duplTagsBA >= 3)] # ab+ba with FS>=3 | |
285 duplTags_FS3_BA = duplTagsBA[(duplTags >= 3) & (duplTagsBA >= 3)] # ba+ab with FS>=3 | |
286 duplTags_double_FS3 = len(duplTags_FS3) + len(duplTags_FS3_BA) # both ab and ba strands with FS>=3 | |
287 | |
288 fig = plt.figure() | |
289 | |
290 plt.subplots_adjust(bottom=0.3) | |
291 counts = plt.hist(list1, bins=range(minimumX, maximumX + 1), stacked=True, label=["duplex", "ab", "ba"], edgecolor="black", linewidth=1, align="left", color=["#FF0000", "#5FB404", "#FFBF00"]) | |
292 # tick labels of x axis | |
293 ticks = numpy.arange(minimumX - 1, maximumX, 1) | |
294 ticks1 = map(str, ticks) | |
295 ticks1[len(ticks1) - 1] = ">20" | |
296 plt.xticks(numpy.array(ticks), ticks1) | |
297 singl = counts[0][2][0] # singletons | |
298 last = counts[0][2][len(counts[0][0]) - 1] # large families | |
299 | |
300 plt.legend(loc='upper right', fontsize=14, bbox_to_anchor=(0.9, 1), frameon=True) | |
301 # plt.title(name1, fontsize=14) | |
302 plt.xlabel("Family size", fontsize=14) | |
303 plt.ylabel("Absolute Frequency", fontsize=14) | |
304 plt.margins(0.01, None) | |
305 plt.grid(b=True, which="major", color="#424242", linestyle=":") | |
306 | |
307 # extra information beneath the plot | |
308 legend = "SSCS ab= \nSSCS ba= \nDCS (total)= \nlength of dataset=" | |
309 plt.text(0.1, 0.09, legend, size=12, transform=plt.gcf().transFigure) | |
310 | |
311 legend = "absolute numbers\n\n{:,}\n{:,}\n{:,} ({:,})\n{:,}".format(len(dataAB), len(dataBA), len(duplTags), len(duplTags_double), (len(dataAB) + len(dataBA) + len(duplTags))) | |
312 plt.text(0.35, 0.09, legend, size=12, transform=plt.gcf().transFigure) | |
313 | |
314 legend = "relative frequencies\nunique\n{:.3f}\n{:.3f}\n{:.3f}\n{:,}".format(float(len(dataAB)) / (len(dataAB) + len(dataBA) + len(duplTags)), float(len(dataBA)) / (len(dataAB) + len(dataBA) + len(duplTags)), float(len(duplTags)) / (len(dataAB) + len(dataBA) + len(duplTags)), (len(dataAB) + len(dataBA) + len(duplTags))) | |
315 plt.text(0.54, 0.09, legend, size=12, transform=plt.gcf().transFigure) | |
316 | |
317 legend = "total\n{:.3f}\n{:.3f}\n{:.3f} ({:.3f})\n{:,}".format(float(len(dataAB)) / (len(ab) + len(ba)), float(len(dataBA)) / (len(ab) + len(ba)), float(len(duplTags)) / (len(ab) + len(ba)), float(len(duplTags_double)) / (len(ab) + len(ba)), (len(ab) + len(ba))) | |
318 plt.text(0.64, 0.09, legend, size=12, transform=plt.gcf().transFigure) | |
319 | |
320 legend1 = "\nsingletons:\nfamily size > 20:" | |
321 plt.text(0.1, 0.03, legend1, size=12, transform=plt.gcf().transFigure) | |
322 | |
323 legend4 = "{:,}\n{:,}".format(singl.astype(int), last.astype(int)) | |
324 plt.text(0.35, 0.03, legend4, size=12, transform=plt.gcf().transFigure) | |
325 | |
326 legend3 = "{:.3f}\n{:.3f}".format(singl / len(data), last / len(data)) | |
327 plt.text(0.54, 0.03, legend3, size=12, transform=plt.gcf().transFigure) | |
328 | |
329 pdf.savefig(fig) | |
330 plt.close() | |
331 | |
332 # write same information to a csv file | |
333 count = numpy.bincount(integers) # original counts of family sizes | |
334 output_file.write("\nDataset:{}{}\n".format(sep, name_file)) | |
335 output_file.write("max. family size:{}{}\n".format(sep, max(integers))) | |
336 output_file.write("absolute frequency:{}{}\n".format(sep, count[len(count) - 1])) | |
337 output_file.write("relative frequency:{}{:.3f}\n\n".format(sep, float(count[len(count) - 1]) / sum(count))) | |
338 | |
339 output_file.write("{}singletons:{}{}family size > 20:\n".format(sep, sep, sep)) | |
340 output_file.write("{}absolute nr.{}rel. freq{}absolute nr.{}rel. freq{}total length\n".format(sep, sep, sep, sep, sep)) | |
341 output_file.write("{}{}{}{}{:.3f}{}{}{}{:.3f}{}{}\n\n".format(name_file, sep, singl.astype(int), sep, singl / len(data), sep, last.astype(int), sep, last / len(data), sep, len(data))) | |
342 | |
343 # information for FS >= 1 | |
344 output_file.write("The unique frequencies were calculated from the dataset where the tags occured only once (=ab without DCS, ba without DCS)\nWhereas the total frequencies were calculated from the whole dataset (=including the DCS).\n\n") | |
345 output_file.write("FS >= 1{}{}unique:{}total:\n".format(sep, sep, sep)) | |
346 output_file.write("nr./rel. freq of ab={}{}{}{:.3f}{}{:.3f}\n".format(sep, len(dataAB), sep, float(len(dataAB)) / (len(dataAB) + len(dataBA) + len( duplTags)), sep, float(len(dataAB)) / (len(ab) + len(ba)))) | |
347 output_file.write("nr./rel. freq of ba={}{}{}{:.3f}{}{:.3f}\n".format(sep, len(dataBA), sep, float(len(dataBA)) / (len(dataBA) + len(dataBA) + len(duplTags)), sep, float(len(dataBA)) / (len(ba) + len(ba)))) | |
348 output_file.write("nr./rel. freq of DCS (total)={}{} ({}){}{:.3f}{}{:.3f} ({:.3f})\n".format(sep, len(duplTags), len(duplTags_double), sep, float(len(duplTags)) / (len(dataAB) + len(dataBA) + len(duplTags)), sep, float(len(duplTags)) / ( len(ab) + len(ba)), float(len(duplTags_double)) / (len(ab) + len(ba)))) | |
349 output_file.write("length of dataset={}{}{}{}{}{}\n".format(sep, (len(dataAB) + len(dataBA) + len(duplTags)), sep, (len(dataAB) + len(dataBA) + len(duplTags)), sep, (len(ab) + len(ba)))) | |
350 # information for FS >= 3 | |
351 output_file.write("FS >= 3{}{}unique:{}total:\n".format(sep, sep, sep)) | |
352 output_file.write("nr./rel. freq of ab={}{}{}{:.3f}{}{:.3f}\n".format(sep, len(dataAB_FS3), sep, float(len(dataAB_FS3)) / (len(dataAB_FS3) + len(dataBA_FS3) + len(duplTags_FS3)), sep, float(len(dataAB_FS3)) / (len(ab_FS3) + len(ba_FS3)))) | |
353 output_file.write("nr./rel. freq of ba={}{}{}{:.3f}{}{:.3f}\n".format(sep, len(dataBA_FS3), sep, float(len(dataBA_FS3)) / (len(dataBA_FS3) + len(dataBA_FS3) + len(duplTags_FS3)), sep, float(len(dataBA_FS3)) / (len(ba_FS3) + len(ba_FS3)))) | |
354 output_file.write("nr./rel. freq of DCS (total)={}{} ({}){}{:.3f}{}{:.3f} ({:.3f})\n".format(sep, len(duplTags_FS3), duplTags_double_FS3, sep, float(len( duplTags_FS3)) / (len(dataBA_FS3) + len(duplTags_FS3)), sep, float(len(duplTags_FS3)) / (len(ab_FS3) + len(ba_FS3)), float(duplTags_double_FS3) / (len(ab_FS3) + len(ba_FS3)))) | |
355 output_file.write("length of dataset={}{}{}{}{}{}\n".format(sep, (len(dataAB_FS3) + len(dataBA_FS3) + len(duplTags_FS3)), sep, (len(dataAB_FS3) + len(dataBA_FS3) + len(duplTags_FS3)), sep, (len(ab_FS3) + len(ba_FS3)))) | |
356 | |
357 output_file.write("\nValues from family size distribution\n") | |
358 output_file.write("{}duplex{}ab{}ba{}sum\n".format(sep, sep, sep, sep)) | |
359 for dx, ab, ba, fs in zip(counts[0][0], counts[0][1], counts[0][2], counts[1]): | |
360 if fs == 21: | |
361 fs = ">20" | |
362 else: | |
363 fs = "={}".format(fs) | |
364 ab1 = ab - dx | |
365 ba1 = ba - ab | |
366 output_file.write("FS{}{}{}{}{}{}{}{}{}\n".format(fs, sep, int(dx), sep, int(ab1), sep, int(ba1), sep, int(ba))) | |
367 | |
368 print("Files successfully created!") | |
369 | |
370 | |
371 if __name__ == '__main__': | |
372 sys.exit(compare_read_families(sys.argv)) |