Mercurial > repos > davidvanzessen > argalaxy_tools
comparison report_clonality/RScript.r~ @ 26:28fbbdfd7a87 draft
Uploaded
author | davidvanzessen |
---|---|
date | Mon, 13 Feb 2017 09:08:46 -0500 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
25:94765af0db1f | 26:28fbbdfd7a87 |
---|---|
1 # ---------------------- load/install packages ---------------------- | |
2 | |
3 if (!("gridExtra" %in% rownames(installed.packages()))) { | |
4 install.packages("gridExtra", repos="http://cran.xl-mirror.nl/") | |
5 } | |
6 library(gridExtra) | |
7 if (!("ggplot2" %in% rownames(installed.packages()))) { | |
8 install.packages("ggplot2", repos="http://cran.xl-mirror.nl/") | |
9 } | |
10 library(ggplot2) | |
11 if (!("plyr" %in% rownames(installed.packages()))) { | |
12 install.packages("plyr", repos="http://cran.xl-mirror.nl/") | |
13 } | |
14 library(plyr) | |
15 | |
16 if (!("data.table" %in% rownames(installed.packages()))) { | |
17 install.packages("data.table", repos="http://cran.xl-mirror.nl/") | |
18 } | |
19 library(data.table) | |
20 | |
21 if (!("reshape2" %in% rownames(installed.packages()))) { | |
22 install.packages("reshape2", repos="http://cran.xl-mirror.nl/") | |
23 } | |
24 library(reshape2) | |
25 | |
26 if (!("lymphclon" %in% rownames(installed.packages()))) { | |
27 install.packages("lymphclon", repos="http://cran.xl-mirror.nl/") | |
28 } | |
29 library(lymphclon) | |
30 | |
31 # ---------------------- parameters ---------------------- | |
32 | |
33 args <- commandArgs(trailingOnly = TRUE) | |
34 | |
35 infile = args[1] #path to input file | |
36 outfile = args[2] #path to output file | |
37 outdir = args[3] #path to output folder (html/images/data) | |
38 clonaltype = args[4] #clonaltype definition, or 'none' for no unique filtering | |
39 ct = unlist(strsplit(clonaltype, ",")) | |
40 species = args[5] #human or mouse | |
41 locus = args[6] # IGH, IGK, IGL, TRB, TRA, TRG or TRD | |
42 filterproductive = ifelse(args[7] == "yes", T, F) #should unproductive sequences be filtered out? (yes/no) | |
43 clonality_method = args[8] | |
44 | |
45 # ---------------------- Data preperation ---------------------- | |
46 | |
47 inputdata = read.table(infile, sep="\t", header=TRUE, fill=T, comment.char="") | |
48 | |
49 setwd(outdir) | |
50 | |
51 # remove weird rows | |
52 inputdata = inputdata[inputdata$Sample != "",] | |
53 | |
54 #remove the allele from the V,D and J genes | |
55 inputdata$Top.V.Gene = gsub("[*]([0-9]+)", "", inputdata$Top.V.Gene) | |
56 inputdata$Top.D.Gene = gsub("[*]([0-9]+)", "", inputdata$Top.D.Gene) | |
57 inputdata$Top.J.Gene = gsub("[*]([0-9]+)", "", inputdata$Top.J.Gene) | |
58 | |
59 inputdata$clonaltype = 1:nrow(inputdata) | |
60 | |
61 PRODF = inputdata | |
62 UNPROD = inputdata | |
63 if(filterproductive){ | |
64 if("Functionality" %in% colnames(inputdata)) { # "Functionality" is an IMGT column | |
65 PRODF = inputdata[inputdata$Functionality == "productive" | inputdata$Functionality == "productive (see comment)", ] | |
66 UNPROD = inputdata[!(inputdata$Functionality == "productive" | inputdata$Functionality == "productive (see comment)"), ] | |
67 } else { | |
68 PRODF = inputdata[inputdata$VDJ.Frame != "In-frame with stop codon" & inputdata$VDJ.Frame != "Out-of-frame" & inputdata$CDR3.Found.How != "NOT_FOUND" , ] | |
69 UNPROD = inputdata[!(inputdata$VDJ.Frame != "In-frame with stop codon" & inputdata$VDJ.Frame != "Out-of-frame" & inputdata$CDR3.Found.How != "NOT_FOUND" ), ] | |
70 } | |
71 } | |
72 | |
73 clonalityFrame = PRODF | |
74 | |
75 #remove duplicates based on the clonaltype | |
76 if(clonaltype != "none"){ | |
77 clonaltype = paste(clonaltype, ",Sample", sep="") #add sample column to clonaltype, unique within samples | |
78 PRODF$clonaltype = do.call(paste, c(PRODF[unlist(strsplit(clonaltype, ","))], sep = ":")) | |
79 PRODF = PRODF[!duplicated(PRODF$clonaltype), ] | |
80 | |
81 UNPROD$clonaltype = do.call(paste, c(UNPROD[unlist(strsplit(clonaltype, ","))], sep = ":")) | |
82 UNPROD = UNPROD[!duplicated(UNPROD$clonaltype), ] | |
83 | |
84 #again for clonalityFrame but with sample+replicate | |
85 clonalityFrame$clonaltype = do.call(paste, c(clonalityFrame[unlist(strsplit(clonaltype, ","))], sep = ":")) | |
86 clonalityFrame$clonality_clonaltype = do.call(paste, c(clonalityFrame[unlist(strsplit(paste(clonaltype, ",Replicate", sep=""), ","))], sep = ":")) | |
87 clonalityFrame = clonalityFrame[!duplicated(clonalityFrame$clonality_clonaltype), ] | |
88 } | |
89 | |
90 PRODF$freq = 1 | |
91 | |
92 if(any(grepl(pattern="_", x=PRODF$ID))){ #the frequency can be stored in the ID with the pattern ".*_freq_.*" | |
93 PRODF$freq = gsub("^[0-9]+_", "", PRODF$ID) | |
94 PRODF$freq = gsub("_.*", "", PRODF$freq) | |
95 PRODF$freq = as.numeric(PRODF$freq) | |
96 if(any(is.na(PRODF$freq))){ #if there was an "_" in the ID, but not the frequency, go back to frequency of 1 for every sequence | |
97 PRODF$freq = 1 | |
98 } | |
99 } | |
100 | |
101 | |
102 | |
103 #write the complete dataset that is left over, will be the input if 'none' for clonaltype and 'no' for filterproductive | |
104 write.table(PRODF, "allUnique.txt", sep=",",quote=F,row.names=F,col.names=T) | |
105 write.table(PRODF, "allUnique.csv", sep="\t",quote=F,row.names=F,col.names=T) | |
106 write.table(UNPROD, "allUnproductive.csv", sep=",",quote=F,row.names=F,col.names=T) | |
107 | |
108 #write the samples to a file | |
109 sampleFile <- file("samples.txt") | |
110 un = unique(inputdata$Sample) | |
111 un = paste(un, sep="\n") | |
112 writeLines(un, sampleFile) | |
113 close(sampleFile) | |
114 | |
115 # ---------------------- Counting the productive/unproductive and unique sequences ---------------------- | |
116 | |
117 if(!("Functionality" %in% inputdata)){ #add a functionality column to the igblast data | |
118 inputdata$Functionality = "unproductive" | |
119 search = (inputdata$VDJ.Frame != "In-frame with stop codon" & inputdata$VDJ.Frame != "Out-of-frame" & inputdata$CDR3.Found.How != "NOT_FOUND") | |
120 if(sum(search) > 0){ | |
121 inputdata[search,]$Functionality = "productive" | |
122 } | |
123 } | |
124 | |
125 inputdata.dt = data.table(inputdata) #for speed | |
126 | |
127 if(clonaltype == "none"){ | |
128 ct = c("clonaltype") | |
129 } | |
130 | |
131 inputdata.dt$samples_replicates = paste(inputdata.dt$Sample, inputdata.dt$Replicate, sep="_") | |
132 samples_replicates = c(unique(inputdata.dt$samples_replicates), unique(as.character(inputdata.dt$Sample))) | |
133 frequency_table = data.frame(ID = samples_replicates[order(samples_replicates)]) | |
134 | |
135 | |
136 sample_productive_count = inputdata.dt[, list(All=.N, | |
137 Productive = nrow(.SD[.SD$Functionality == "productive" | .SD$Functionality == "productive (see comment)",]), | |
138 perc_prod = 1, | |
139 Productive_unique = nrow(.SD[.SD$Functionality == "productive" | .SD$Functionality == "productive (see comment)",list(count=.N),by=ct]), | |
140 perc_prod_un = 1, | |
141 Unproductive= nrow(.SD[.SD$Functionality != "productive" & .SD$Functionality != "productive (see comment)",]), | |
142 perc_unprod = 1, | |
143 Unproductive_unique =nrow(.SD[.SD$Functionality != "productive" & .SD$Functionality != "productive (see comment)",list(count=.N),by=ct]), | |
144 perc_unprod_un = 1), | |
145 by=c("Sample")] | |
146 | |
147 sample_productive_count$perc_prod = round(sample_productive_count$Productive / sample_productive_count$All * 100) | |
148 sample_productive_count$perc_prod_un = round(sample_productive_count$Productive_unique / sample_productive_count$All * 100) | |
149 | |
150 sample_productive_count$perc_unprod = round(sample_productive_count$Unproductive / sample_productive_count$All * 100) | |
151 sample_productive_count$perc_unprod_un = round(sample_productive_count$Unproductive_unique / sample_productive_count$All * 100) | |
152 | |
153 | |
154 sample_replicate_productive_count = inputdata.dt[, list(All=.N, | |
155 Productive = nrow(.SD[.SD$Functionality == "productive" | .SD$Functionality == "productive (see comment)",]), | |
156 perc_prod = 1, | |
157 Productive_unique = nrow(.SD[.SD$Functionality == "productive" | .SD$Functionality == "productive (see comment)",list(count=.N),by=ct]), | |
158 perc_prod_un = 1, | |
159 Unproductive= nrow(.SD[.SD$Functionality != "productive" & .SD$Functionality != "productive (see comment)",]), | |
160 perc_unprod = 1, | |
161 Unproductive_unique =nrow(.SD[.SD$Functionality != "productive" & .SD$Functionality != "productive (see comment)",list(count=.N),by=ct]), | |
162 perc_unprod_un = 1), | |
163 by=c("samples_replicates")] | |
164 | |
165 sample_replicate_productive_count$perc_prod = round(sample_replicate_productive_count$Productive / sample_replicate_productive_count$All * 100) | |
166 sample_replicate_productive_count$perc_prod_un = round(sample_replicate_productive_count$Productive_unique / sample_replicate_productive_count$All * 100) | |
167 | |
168 sample_replicate_productive_count$perc_unprod = round(sample_replicate_productive_count$Unproductive / sample_replicate_productive_count$All * 100) | |
169 sample_replicate_productive_count$perc_unprod_un = round(sample_replicate_productive_count$Unproductive_unique / sample_replicate_productive_count$All * 100) | |
170 | |
171 setnames(sample_replicate_productive_count, colnames(sample_productive_count)) | |
172 | |
173 counts = rbind(sample_replicate_productive_count, sample_productive_count) | |
174 counts = counts[order(counts$Sample),] | |
175 | |
176 write.table(x=counts, file="productive_counting.txt", sep=",",quote=F,row.names=F,col.names=F) | |
177 | |
178 # ---------------------- Frequency calculation for V, D and J ---------------------- | |
179 | |
180 PRODFV = data.frame(data.table(PRODF)[, list(Length=sum(freq)), by=c("Sample", "Top.V.Gene")]) | |
181 Total = ddply(PRODFV, .(Sample), function(x) data.frame(Total = sum(x$Length))) | |
182 PRODFV = merge(PRODFV, Total, by.x='Sample', by.y='Sample', all.x=TRUE) | |
183 PRODFV = ddply(PRODFV, c("Sample", "Top.V.Gene"), summarise, relFreq= (Length*100 / Total)) | |
184 | |
185 PRODFD = data.frame(data.table(PRODF)[, list(Length=sum(freq)), by=c("Sample", "Top.D.Gene")]) | |
186 Total = ddply(PRODFD, .(Sample), function(x) data.frame(Total = sum(x$Length))) | |
187 PRODFD = merge(PRODFD, Total, by.x='Sample', by.y='Sample', all.x=TRUE) | |
188 PRODFD = ddply(PRODFD, c("Sample", "Top.D.Gene"), summarise, relFreq= (Length*100 / Total)) | |
189 | |
190 PRODFJ = data.frame(data.table(PRODF)[, list(Length=sum(freq)), by=c("Sample", "Top.J.Gene")]) | |
191 Total = ddply(PRODFJ, .(Sample), function(x) data.frame(Total = sum(x$Length))) | |
192 PRODFJ = merge(PRODFJ, Total, by.x='Sample', by.y='Sample', all.x=TRUE) | |
193 PRODFJ = ddply(PRODFJ, c("Sample", "Top.J.Gene"), summarise, relFreq= (Length*100 / Total)) | |
194 | |
195 # ---------------------- Setting up the gene names for the different species/loci ---------------------- | |
196 | |
197 Vchain = "" | |
198 Dchain = "" | |
199 Jchain = "" | |
200 | |
201 if(species == "custom"){ | |
202 print("Custom genes: ") | |
203 splt = unlist(strsplit(locus, ";")) | |
204 print(paste("V:", splt[1])) | |
205 print(paste("D:", splt[2])) | |
206 print(paste("J:", splt[3])) | |
207 | |
208 Vchain = unlist(strsplit(splt[1], ",")) | |
209 Vchain = data.frame(v.name = Vchain, chr.orderV = 1:length(Vchain)) | |
210 | |
211 Dchain = unlist(strsplit(splt[2], ",")) | |
212 if(length(Dchain) > 0){ | |
213 Dchain = data.frame(v.name = Dchain, chr.orderD = 1:length(Dchain)) | |
214 } else { | |
215 Dchain = data.frame(v.name = character(0), chr.orderD = numeric(0)) | |
216 } | |
217 | |
218 Jchain = unlist(strsplit(splt[3], ",")) | |
219 Jchain = data.frame(v.name = Jchain, chr.orderJ = 1:length(Jchain)) | |
220 | |
221 } else { | |
222 genes = read.table("genes.txt", sep="\t", header=TRUE, fill=T, comment.char="") | |
223 | |
224 Vchain = genes[grepl(species, genes$Species) & genes$locus == locus & genes$region == "V",c("IMGT.GENE.DB", "chr.order")] | |
225 colnames(Vchain) = c("v.name", "chr.orderV") | |
226 Dchain = genes[grepl(species, genes$Species) & genes$locus == locus & genes$region == "D",c("IMGT.GENE.DB", "chr.order")] | |
227 colnames(Dchain) = c("v.name", "chr.orderD") | |
228 Jchain = genes[grepl(species, genes$Species) & genes$locus == locus & genes$region == "J",c("IMGT.GENE.DB", "chr.order")] | |
229 colnames(Jchain) = c("v.name", "chr.orderJ") | |
230 } | |
231 useD = TRUE | |
232 if(nrow(Dchain) == 0){ | |
233 useD = FALSE | |
234 cat("No D Genes in this species/locus") | |
235 } | |
236 print(paste("useD:", useD)) | |
237 | |
238 # ---------------------- merge with the frequency count ---------------------- | |
239 | |
240 PRODFV = merge(PRODFV, Vchain, by.x='Top.V.Gene', by.y='v.name', all.x=TRUE) | |
241 | |
242 PRODFD = merge(PRODFD, Dchain, by.x='Top.D.Gene', by.y='v.name', all.x=TRUE) | |
243 | |
244 PRODFJ = merge(PRODFJ, Jchain, by.x='Top.J.Gene', by.y='v.name', all.x=TRUE) | |
245 | |
246 # ---------------------- Create the V, D and J frequency plots and write the data.frame for every plot to a file ---------------------- | |
247 | |
248 pV = ggplot(PRODFV) | |
249 pV = pV + geom_bar( aes( x=factor(reorder(Top.V.Gene, chr.orderV)), y=relFreq, fill=Sample), stat='identity', position="dodge") + theme(axis.text.x = element_text(angle = 90, hjust = 1)) | |
250 pV = pV + xlab("Summary of V gene") + ylab("Frequency") + ggtitle("Relative frequency of V gene usage") | |
251 write.table(x=PRODFV, file="VFrequency.csv", sep=",",quote=F,row.names=F,col.names=T) | |
252 | |
253 png("VPlot.png",width = 1280, height = 720) | |
254 pV | |
255 dev.off(); | |
256 | |
257 if(useD){ | |
258 pD = ggplot(PRODFD) | |
259 pD = pD + geom_bar( aes( x=factor(reorder(Top.D.Gene, chr.orderD)), y=relFreq, fill=Sample), stat='identity', position="dodge") + theme(axis.text.x = element_text(angle = 90, hjust = 1)) | |
260 pD = pD + xlab("Summary of D gene") + ylab("Frequency") + ggtitle("Relative frequency of D gene usage") | |
261 write.table(x=PRODFD, file="DFrequency.csv", sep=",",quote=F,row.names=F,col.names=T) | |
262 | |
263 png("DPlot.png",width = 800, height = 600) | |
264 print(pD) | |
265 dev.off(); | |
266 } | |
267 | |
268 pJ = ggplot(PRODFJ) | |
269 pJ = pJ + geom_bar( aes( x=factor(reorder(Top.J.Gene, chr.orderJ)), y=relFreq, fill=Sample), stat='identity', position="dodge") + theme(axis.text.x = element_text(angle = 90, hjust = 1)) | |
270 pJ = pJ + xlab("Summary of J gene") + ylab("Frequency") + ggtitle("Relative frequency of J gene usage") | |
271 write.table(x=PRODFJ, file="JFrequency.csv", sep=",",quote=F,row.names=F,col.names=T) | |
272 | |
273 png("JPlot.png",width = 800, height = 600) | |
274 pJ | |
275 dev.off(); | |
276 | |
277 pJ = ggplot(PRODFJ) | |
278 pJ = pJ + geom_bar( aes( x=factor(reorder(Top.J.Gene, chr.orderJ)), y=relFreq, fill=Sample), stat='identity', position="dodge") + theme(axis.text.x = element_text(angle = 90, hjust = 1)) | |
279 pJ = pJ + xlab("Summary of J gene") + ylab("Frequency") + ggtitle("Relative frequency of J gene usage") | |
280 write.table(x=PRODFJ, file="JFrequency.csv", sep=",",quote=F,row.names=F,col.names=T) | |
281 | |
282 png("JPlot.png",width = 800, height = 600) | |
283 pJ | |
284 dev.off(); | |
285 | |
286 # ---------------------- Now the frequency plots of the V, D and J families ---------------------- | |
287 | |
288 VGenes = PRODF[,c("Sample", "Top.V.Gene")] | |
289 VGenes$Top.V.Gene = gsub("-.*", "", VGenes$Top.V.Gene) | |
290 VGenes = data.frame(data.table(VGenes)[, list(Count=.N), by=c("Sample", "Top.V.Gene")]) | |
291 TotalPerSample = data.frame(data.table(VGenes)[, list(total=sum(.SD$Count)), by=Sample]) | |
292 VGenes = merge(VGenes, TotalPerSample, by="Sample") | |
293 VGenes$Frequency = VGenes$Count * 100 / VGenes$total | |
294 VPlot = ggplot(VGenes) | |
295 VPlot = VPlot + geom_bar(aes( x = Top.V.Gene, y = Frequency, fill = Sample), stat='identity', position='dodge' ) + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + | |
296 ggtitle("Distribution of V gene families") + | |
297 ylab("Percentage of sequences") | |
298 png("VFPlot.png") | |
299 VPlot | |
300 dev.off(); | |
301 write.table(x=VGenes, file="VFFrequency.csv", sep=",",quote=F,row.names=F,col.names=T) | |
302 | |
303 if(useD){ | |
304 DGenes = PRODF[,c("Sample", "Top.D.Gene")] | |
305 DGenes$Top.D.Gene = gsub("-.*", "", DGenes$Top.D.Gene) | |
306 DGenes = data.frame(data.table(DGenes)[, list(Count=.N), by=c("Sample", "Top.D.Gene")]) | |
307 TotalPerSample = data.frame(data.table(DGenes)[, list(total=sum(.SD$Count)), by=Sample]) | |
308 DGenes = merge(DGenes, TotalPerSample, by="Sample") | |
309 DGenes$Frequency = DGenes$Count * 100 / DGenes$total | |
310 DPlot = ggplot(DGenes) | |
311 DPlot = DPlot + geom_bar(aes( x = Top.D.Gene, y = Frequency, fill = Sample), stat='identity', position='dodge' ) + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + | |
312 ggtitle("Distribution of D gene families") + | |
313 ylab("Percentage of sequences") | |
314 png("DFPlot.png") | |
315 print(DPlot) | |
316 dev.off(); | |
317 write.table(x=DGenes, file="DFFrequency.csv", sep=",",quote=F,row.names=F,col.names=T) | |
318 } | |
319 | |
320 JGenes = PRODF[,c("Sample", "Top.J.Gene")] | |
321 JGenes$Top.J.Gene = gsub("-.*", "", JGenes$Top.J.Gene) | |
322 JGenes = data.frame(data.table(JGenes)[, list(Count=.N), by=c("Sample", "Top.J.Gene")]) | |
323 TotalPerSample = data.frame(data.table(JGenes)[, list(total=sum(.SD$Count)), by=Sample]) | |
324 JGenes = merge(JGenes, TotalPerSample, by="Sample") | |
325 JGenes$Frequency = JGenes$Count * 100 / JGenes$total | |
326 JPlot = ggplot(JGenes) | |
327 JPlot = JPlot + geom_bar(aes( x = Top.J.Gene, y = Frequency, fill = Sample), stat='identity', position='dodge' ) + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + | |
328 ggtitle("Distribution of J gene families") + | |
329 ylab("Percentage of sequences") | |
330 png("JFPlot.png") | |
331 JPlot | |
332 dev.off(); | |
333 write.table(x=JGenes, file="JFFrequency.csv", sep=",",quote=F,row.names=F,col.names=T) | |
334 | |
335 # ---------------------- Plotting the cdr3 length ---------------------- | |
336 | |
337 CDR3Length = data.frame(data.table(PRODF)[, list(Count=.N), by=c("Sample", "CDR3.Length.DNA")]) | |
338 TotalPerSample = data.frame(data.table(CDR3Length)[, list(total=sum(.SD$Count)), by=Sample]) | |
339 CDR3Length = merge(CDR3Length, TotalPerSample, by="Sample") | |
340 CDR3Length$Frequency = CDR3Length$Count * 100 / CDR3Length$total | |
341 CDR3LengthPlot = ggplot(CDR3Length) | |
342 CDR3LengthPlot = CDR3LengthPlot + geom_bar(aes( x = CDR3.Length.DNA, y = Frequency, fill = Sample), stat='identity', position='dodge' ) + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + | |
343 ggtitle("Length distribution of CDR3") + | |
344 xlab("CDR3 Length") + | |
345 ylab("Percentage of sequences") | |
346 png("CDR3LengthPlot.png",width = 1280, height = 720) | |
347 CDR3LengthPlot | |
348 dev.off() | |
349 write.table(x=CDR3Length, file="CDR3LengthPlot.csv", sep=",",quote=F,row.names=F,col.names=T) | |
350 | |
351 # ---------------------- Plot the heatmaps ---------------------- | |
352 | |
353 | |
354 #get the reverse order for the V and D genes | |
355 revVchain = Vchain | |
356 revDchain = Dchain | |
357 revVchain$chr.orderV = rev(revVchain$chr.orderV) | |
358 revDchain$chr.orderD = rev(revDchain$chr.orderD) | |
359 | |
360 if(useD){ | |
361 plotVD <- function(dat){ | |
362 if(length(dat[,1]) == 0){ | |
363 return() | |
364 } | |
365 img = ggplot() + | |
366 geom_tile(data=dat, aes(x=factor(reorder(Top.D.Gene, chr.orderD)), y=factor(reorder(Top.V.Gene, chr.orderV)), fill=relLength)) + | |
367 theme(axis.text.x = element_text(angle = 90, hjust = 1)) + | |
368 scale_fill_gradient(low="gold", high="blue", na.value="white") + | |
369 ggtitle(paste(unique(dat$Sample), " (N=" , sum(dat$Length, na.rm=T) ,")", sep="")) + | |
370 xlab("D genes") + | |
371 ylab("V Genes") | |
372 | |
373 png(paste("HeatmapVD_", unique(dat[3])[1,1] , ".png", sep=""), width=150+(15*length(Dchain$v.name)), height=100+(15*length(Vchain$v.name))) | |
374 print(img) | |
375 dev.off() | |
376 write.table(x=acast(dat, Top.V.Gene~Top.D.Gene, value.var="Length"), file=paste("HeatmapVD_", unique(dat[3])[1,1], ".csv", sep=""), sep=",",quote=F,row.names=T,col.names=NA) | |
377 } | |
378 | |
379 VandDCount = data.frame(data.table(PRODF)[, list(Length=.N), by=c("Top.V.Gene", "Top.D.Gene", "Sample")]) | |
380 | |
381 VandDCount$l = log(VandDCount$Length) | |
382 maxVD = data.frame(data.table(VandDCount)[, list(max=max(l)), by=c("Sample")]) | |
383 VandDCount = merge(VandDCount, maxVD, by.x="Sample", by.y="Sample", all.x=T) | |
384 VandDCount$relLength = VandDCount$l / VandDCount$max | |
385 | |
386 cartegianProductVD = expand.grid(Top.V.Gene = Vchain$v.name, Top.D.Gene = Dchain$v.name, Sample = unique(inputdata$Sample)) | |
387 | |
388 completeVD = merge(VandDCount, cartegianProductVD, all.y=TRUE) | |
389 completeVD = merge(completeVD, revVchain, by.x="Top.V.Gene", by.y="v.name", all.x=TRUE) | |
390 completeVD = merge(completeVD, Dchain, by.x="Top.D.Gene", by.y="v.name", all.x=TRUE) | |
391 VDList = split(completeVD, f=completeVD[,"Sample"]) | |
392 | |
393 lapply(VDList, FUN=plotVD) | |
394 } | |
395 | |
396 plotVJ <- function(dat){ | |
397 if(length(dat[,1]) == 0){ | |
398 return() | |
399 } | |
400 cat(paste(unique(dat[3])[1,1])) | |
401 img = ggplot() + | |
402 geom_tile(data=dat, aes(x=factor(reorder(Top.J.Gene, chr.orderJ)), y=factor(reorder(Top.V.Gene, chr.orderV)), fill=relLength)) + | |
403 theme(axis.text.x = element_text(angle = 90, hjust = 1)) + | |
404 scale_fill_gradient(low="gold", high="blue", na.value="white") + | |
405 ggtitle(paste(unique(dat$Sample), " (N=" , sum(dat$Length, na.rm=T) ,")", sep="")) + | |
406 xlab("J genes") + | |
407 ylab("V Genes") | |
408 | |
409 png(paste("HeatmapVJ_", unique(dat[3])[1,1] , ".png", sep=""), width=150+(15*length(Jchain$v.name)), height=100+(15*length(Vchain$v.name))) | |
410 print(img) | |
411 dev.off() | |
412 write.table(x=acast(dat, Top.V.Gene~Top.J.Gene, value.var="Length"), file=paste("HeatmapVJ_", unique(dat[3])[1,1], ".csv", sep=""), sep=",",quote=F,row.names=T,col.names=NA) | |
413 } | |
414 | |
415 VandJCount = data.frame(data.table(PRODF)[, list(Length=.N), by=c("Top.V.Gene", "Top.J.Gene", "Sample")]) | |
416 | |
417 VandJCount$l = log(VandJCount$Length) | |
418 maxVJ = data.frame(data.table(VandJCount)[, list(max=max(l)), by=c("Sample")]) | |
419 VandJCount = merge(VandJCount, maxVJ, by.x="Sample", by.y="Sample", all.x=T) | |
420 VandJCount$relLength = VandJCount$l / VandJCount$max | |
421 | |
422 cartegianProductVJ = expand.grid(Top.V.Gene = Vchain$v.name, Top.J.Gene = Jchain$v.name, Sample = unique(inputdata$Sample)) | |
423 | |
424 completeVJ = merge(VandJCount, cartegianProductVJ, all.y=TRUE) | |
425 completeVJ = merge(completeVJ, revVchain, by.x="Top.V.Gene", by.y="v.name", all.x=TRUE) | |
426 completeVJ = merge(completeVJ, Jchain, by.x="Top.J.Gene", by.y="v.name", all.x=TRUE) | |
427 VJList = split(completeVJ, f=completeVJ[,"Sample"]) | |
428 lapply(VJList, FUN=plotVJ) | |
429 | |
430 if(useD){ | |
431 plotDJ <- function(dat){ | |
432 if(length(dat[,1]) == 0){ | |
433 return() | |
434 } | |
435 img = ggplot() + | |
436 geom_tile(data=dat, aes(x=factor(reorder(Top.J.Gene, chr.orderJ)), y=factor(reorder(Top.D.Gene, chr.orderD)), fill=relLength)) + | |
437 theme(axis.text.x = element_text(angle = 90, hjust = 1)) + | |
438 scale_fill_gradient(low="gold", high="blue", na.value="white") + | |
439 ggtitle(paste(unique(dat$Sample), " (N=" , sum(dat$Length, na.rm=T) ,")", sep="")) + | |
440 xlab("J genes") + | |
441 ylab("D Genes") | |
442 | |
443 png(paste("HeatmapDJ_", unique(dat[3])[1,1] , ".png", sep=""), width=150+(15*length(Jchain$v.name)), height=100+(15*length(Dchain$v.name))) | |
444 print(img) | |
445 dev.off() | |
446 write.table(x=acast(dat, Top.D.Gene~Top.J.Gene, value.var="Length"), file=paste("HeatmapDJ_", unique(dat[3])[1,1], ".csv", sep=""), sep=",",quote=F,row.names=T,col.names=NA) | |
447 } | |
448 | |
449 | |
450 DandJCount = data.frame(data.table(PRODF)[, list(Length=.N), by=c("Top.D.Gene", "Top.J.Gene", "Sample")]) | |
451 | |
452 DandJCount$l = log(DandJCount$Length) | |
453 maxDJ = data.frame(data.table(DandJCount)[, list(max=max(l)), by=c("Sample")]) | |
454 DandJCount = merge(DandJCount, maxDJ, by.x="Sample", by.y="Sample", all.x=T) | |
455 DandJCount$relLength = DandJCount$l / DandJCount$max | |
456 | |
457 cartegianProductDJ = expand.grid(Top.D.Gene = Dchain$v.name, Top.J.Gene = Jchain$v.name, Sample = unique(inputdata$Sample)) | |
458 | |
459 completeDJ = merge(DandJCount, cartegianProductDJ, all.y=TRUE) | |
460 completeDJ = merge(completeDJ, revDchain, by.x="Top.D.Gene", by.y="v.name", all.x=TRUE) | |
461 completeDJ = merge(completeDJ, Jchain, by.x="Top.J.Gene", by.y="v.name", all.x=TRUE) | |
462 DJList = split(completeDJ, f=completeDJ[,"Sample"]) | |
463 lapply(DJList, FUN=plotDJ) | |
464 } | |
465 | |
466 | |
467 # ---------------------- calculating the clonality score ---------------------- | |
468 | |
469 if("Replicate" %in% colnames(inputdata)) #can only calculate clonality score when replicate information is available | |
470 { | |
471 if(clonality_method == "boyd"){ | |
472 samples = split(clonalityFrame, clonalityFrame$Sample, drop=T) | |
473 | |
474 for (sample in samples){ | |
475 res = data.frame(paste=character(0)) | |
476 sample_id = unique(sample$Sample)[[1]] | |
477 for(replicate in unique(sample$Replicate)){ | |
478 tmp = sample[sample$Replicate == replicate,] | |
479 clone_table = data.frame(table(tmp$clonaltype)) | |
480 clone_col_name = paste("V", replicate, sep="") | |
481 colnames(clone_table) = c("paste", clone_col_name) | |
482 res = merge(res, clone_table, by="paste", all=T) | |
483 } | |
484 | |
485 res[is.na(res)] = 0 | |
486 infer.result = infer.clonality(as.matrix(res[,2:ncol(res)])) | |
487 | |
488 write.table(data.table(infer.result[[12]]), file=paste("lymphclon_clonality_", sample_id, ".csv", sep=""), sep=",",quote=F,row.names=F,col.names=F) | |
489 | |
490 res$type = rowSums(res[,2:ncol(res)]) | |
491 | |
492 coincidence.table = data.frame(table(res$type)) | |
493 colnames(coincidence.table) = c("Coincidence Type", "Raw Coincidence Freq") | |
494 write.table(coincidence.table, file=paste("lymphclon_coincidences_", sample_id, ".csv", sep=""), sep=",",quote=F,row.names=F,col.names=T) | |
495 } | |
496 } else { | |
497 write.table(clonalityFrame, "clonalityComplete.csv", sep=",",quote=F,row.names=F,col.names=T) | |
498 | |
499 clonalFreq = data.frame(data.table(clonalityFrame)[, list(Type=.N), by=c("Sample", "clonaltype")]) | |
500 clonalFreqCount = data.frame(data.table(clonalFreq)[, list(Count=.N), by=c("Sample", "Type")]) | |
501 clonalFreqCount$realCount = clonalFreqCount$Type * clonalFreqCount$Count | |
502 clonalSum = data.frame(data.table(clonalFreqCount)[, list(Reads=sum(realCount)), by=c("Sample")]) | |
503 clonalFreqCount = merge(clonalFreqCount, clonalSum, by.x="Sample", by.y="Sample") | |
504 | |
505 ct = c('Type\tWeight\n2\t1\n3\t3\n4\t6\n5\t10\n6\t15') | |
506 tcct = textConnection(ct) | |
507 CT = read.table(tcct, sep="\t", header=TRUE) | |
508 close(tcct) | |
509 clonalFreqCount = merge(clonalFreqCount, CT, by.x="Type", by.y="Type", all.x=T) | |
510 clonalFreqCount$WeightedCount = clonalFreqCount$Count * clonalFreqCount$Weight | |
511 | |
512 ReplicateReads = data.frame(data.table(clonalityFrame)[, list(Type=.N), by=c("Sample", "Replicate", "clonaltype")]) | |
513 ReplicateReads = data.frame(data.table(ReplicateReads)[, list(Reads=.N), by=c("Sample", "Replicate")]) | |
514 clonalFreqCount$Reads = as.numeric(clonalFreqCount$Reads) | |
515 ReplicateReads$squared = ReplicateReads$Reads * ReplicateReads$Reads | |
516 | |
517 ReplicatePrint <- function(dat){ | |
518 write.table(dat[-1], paste("ReplicateReads_", unique(dat[1])[1,1] , ".csv", sep=""), sep=",",quote=F,na="-",row.names=F,col.names=F) | |
519 } | |
520 | |
521 ReplicateSplit = split(ReplicateReads, f=ReplicateReads[,"Sample"]) | |
522 lapply(ReplicateSplit, FUN=ReplicatePrint) | |
523 | |
524 ReplicateReads = data.frame(data.table(ReplicateReads)[, list(ReadsSum=sum(as.numeric(Reads)), ReadsSquaredSum=sum(as.numeric(squared))), by=c("Sample")]) | |
525 clonalFreqCount = merge(clonalFreqCount, ReplicateReads, by.x="Sample", by.y="Sample", all.x=T) | |
526 | |
527 ReplicateSumPrint <- function(dat){ | |
528 write.table(dat[-1], paste("ReplicateSumReads_", unique(dat[1])[1,1] , ".csv", sep=""), sep=",",quote=F,na="-",row.names=F,col.names=F) | |
529 } | |
530 | |
531 ReplicateSumSplit = split(ReplicateReads, f=ReplicateReads[,"Sample"]) | |
532 lapply(ReplicateSumSplit, FUN=ReplicateSumPrint) | |
533 | |
534 clonalFreqCountSum = data.frame(data.table(clonalFreqCount)[, list(Numerator=sum(WeightedCount, na.rm=T)), by=c("Sample")]) | |
535 clonalFreqCount = merge(clonalFreqCount, clonalFreqCountSum, by.x="Sample", by.y="Sample", all.x=T) | |
536 clonalFreqCount$ReadsSum = as.numeric(clonalFreqCount$ReadsSum) #prevent integer overflow | |
537 clonalFreqCount$Denominator = (((clonalFreqCount$ReadsSum * clonalFreqCount$ReadsSum) - clonalFreqCount$ReadsSquaredSum) / 2) | |
538 clonalFreqCount$Result = (clonalFreqCount$Numerator + 1) / (clonalFreqCount$Denominator + 1) | |
539 | |
540 ClonalityScorePrint <- function(dat){ | |
541 write.table(dat$Result, paste("ClonalityScore_", unique(dat[1])[1,1] , ".csv", sep=""), sep=",",quote=F,na="-",row.names=F,col.names=F) | |
542 } | |
543 | |
544 clonalityScore = clonalFreqCount[c("Sample", "Result")] | |
545 clonalityScore = unique(clonalityScore) | |
546 | |
547 clonalityScoreSplit = split(clonalityScore, f=clonalityScore[,"Sample"]) | |
548 lapply(clonalityScoreSplit, FUN=ClonalityScorePrint) | |
549 | |
550 clonalityOverview = clonalFreqCount[c("Sample", "Type", "Count", "Weight", "WeightedCount")] | |
551 | |
552 | |
553 | |
554 ClonalityOverviewPrint <- function(dat){ | |
555 write.table(dat[-1], paste("ClonalityOverView_", unique(dat[1])[1,1] , ".csv", sep=""), sep=",",quote=F,na="-",row.names=F,col.names=F) | |
556 } | |
557 | |
558 clonalityOverviewSplit = split(clonalityOverview, f=clonalityOverview$Sample) | |
559 lapply(clonalityOverviewSplit, FUN=ClonalityOverviewPrint) | |
560 } | |
561 } | |
562 | |
563 imgtcolumns = c("X3V.REGION.trimmed.nt.nb","P3V.nt.nb", "N1.REGION.nt.nb", "P5D.nt.nb", "X5D.REGION.trimmed.nt.nb", "X3D.REGION.trimmed.nt.nb", "P3D.nt.nb", "N2.REGION.nt.nb", "P5J.nt.nb", "X5J.REGION.trimmed.nt.nb", "X3V.REGION.trimmed.nt.nb", "X5D.REGION.trimmed.nt.nb", "X3D.REGION.trimmed.nt.nb", "X5J.REGION.trimmed.nt.nb", "N1.REGION.nt.nb", "N2.REGION.nt.nb", "P3V.nt.nb", "P5D.nt.nb", "P3D.nt.nb", "P5J.nt.nb") | |
564 if(all(imgtcolumns %in% colnames(inputdata))) | |
565 { | |
566 print("found IMGT columns, running junction analysis") | |
567 newData = data.frame(data.table(PRODF)[,list(unique=.N, | |
568 VH.DEL=mean(.SD$X3V.REGION.trimmed.nt.nb, na.rm=T), | |
569 P1=mean(.SD$P3V.nt.nb, na.rm=T), | |
570 N1=mean(.SD$N1.REGION.nt.nb, na.rm=T), | |
571 P2=mean(.SD$P5D.nt.nb, na.rm=T), | |
572 DEL.DH=mean(.SD$X5D.REGION.trimmed.nt.nb, na.rm=T), | |
573 DH.DEL=mean(.SD$X3D.REGION.trimmed.nt.nb, na.rm=T), | |
574 P3=mean(.SD$P3D.nt.nb, na.rm=T), | |
575 N2=mean(.SD$N2.REGION.nt.nb, na.rm=T), | |
576 P4=mean(.SD$P5J.nt.nb, na.rm=T), | |
577 DEL.JH=mean(.SD$X5J.REGION.trimmed.nt.nb, na.rm=T), | |
578 Total.Del=( mean(.SD$X3V.REGION.trimmed.nt.nb, na.rm=T) + | |
579 mean(.SD$X5D.REGION.trimmed.nt.nb, na.rm=T) + | |
580 mean(.SD$X3D.REGION.trimmed.nt.nb, na.rm=T) + | |
581 mean(.SD$X5J.REGION.trimmed.nt.nb, na.rm=T)), | |
582 | |
583 Total.N=( mean(.SD$N1.REGION.nt.nb, na.rm=T) + | |
584 mean(.SD$N2.REGION.nt.nb, na.rm=T)), | |
585 | |
586 Total.P=( mean(.SD$P3V.nt.nb, na.rm=T) + | |
587 mean(.SD$P5D.nt.nb, na.rm=T) + | |
588 mean(.SD$P3D.nt.nb, na.rm=T) + | |
589 mean(.SD$P5J.nt.nb, na.rm=T))), | |
590 by=c("Sample")]) | |
591 print(newData) | |
592 newData[,sapply(newData, is.numeric)] = round(newData[,sapply(newData, is.numeric)],1) | |
593 write.table(newData, "junctionAnalysisProd.csv" , sep=",",quote=F,na="-",row.names=F,col.names=F) | |
594 | |
595 newData = data.frame(data.table(UNPROD)[,list(unique=.N, | |
596 VH.DEL=mean(.SD$X3V.REGION.trimmed.nt.nb, na.rm=T), | |
597 P1=mean(.SD$P3V.nt.nb, na.rm=T), | |
598 N1=mean(.SD$N1.REGION.nt.nb, na.rm=T), | |
599 P2=mean(.SD$P5D.nt.nb, na.rm=T), | |
600 DEL.DH=mean(.SD$X5D.REGION.trimmed.nt.nb, na.rm=T), | |
601 DH.DEL=mean(.SD$X3D.REGION.trimmed.nt.nb, na.rm=T), | |
602 P3=mean(.SD$P3D.nt.nb, na.rm=T), | |
603 N2=mean(.SD$N2.REGION.nt.nb, na.rm=T), | |
604 P4=mean(.SD$P5J.nt.nb, na.rm=T), | |
605 DEL.JH=mean(.SD$X5J.REGION.trimmed.nt.nb, na.rm=T), | |
606 Total.Del=(mean(.SD$X3V.REGION.trimmed.nt.nb, na.rm=T) + | |
607 mean(.SD$X5D.REGION.trimmed.nt.nb, na.rm=T) + | |
608 mean(.SD$X3D.REGION.trimmed.nt.nb, na.rm=T) + | |
609 mean(.SD$X5J.REGION.trimmed.nt.nb, na.rm=T)), | |
610 Total.N=( mean(.SD$N1.REGION.nt.nb, na.rm=T) + | |
611 mean(.SD$N2.REGION.nt.nb, na.rm=T)), | |
612 Total.P=( mean(.SD$P3V.nt.nb, na.rm=T) + | |
613 mean(.SD$P5D.nt.nb, na.rm=T) + | |
614 mean(.SD$P3D.nt.nb, na.rm=T) + | |
615 mean(.SD$P5J.nt.nb, na.rm=T))), | |
616 by=c("Sample")]) | |
617 newData[,sapply(newData, is.numeric)] = round(newData[,sapply(newData, is.numeric)],1) | |
618 write.table(newData, "junctionAnalysisUnProd.csv" , sep=",",quote=F,na="-",row.names=F,col.names=F) | |
619 } | |
620 | |
621 # ---------------------- AA composition in CDR3 ---------------------- | |
622 | |
623 AACDR3 = PRODF[,c("Sample", "CDR3.Seq")] | |
624 | |
625 TotalPerSample = data.frame(data.table(AACDR3)[, list(total=sum(nchar(as.character(.SD$CDR3.Seq)))), by=Sample]) | |
626 | |
627 AAfreq = list() | |
628 | |
629 for(i in 1:nrow(TotalPerSample)){ | |
630 sample = TotalPerSample$Sample[i] | |
631 AAfreq[[i]] = data.frame(table(unlist(strsplit(as.character(AACDR3[AACDR3$Sample == sample,c("CDR3.Seq")]), "")))) | |
632 AAfreq[[i]]$Sample = sample | |
633 } | |
634 | |
635 AAfreq = ldply(AAfreq, data.frame) | |
636 AAfreq = merge(AAfreq, TotalPerSample, by="Sample", all.x = T) | |
637 AAfreq$freq_perc = as.numeric(AAfreq$Freq / AAfreq$total * 100) | |
638 | |
639 | |
640 AAorder = read.table(sep="\t", header=TRUE, text="order.aa\tAA\n1\tR\n2\tK\n3\tN\n4\tD\n5\tQ\n6\tE\n7\tH\n8\tP\n9\tY\n10\tW\n11\tS\n12\tT\n13\tG\n14\tA\n15\tM\n16\tC\n17\tF\n18\tL\n19\tV\n20\tI") | |
641 AAfreq = merge(AAfreq, AAorder, by.x='Var1', by.y='AA', all.x=TRUE) | |
642 | |
643 AAfreq = AAfreq[!is.na(AAfreq$order.aa),] | |
644 | |
645 AAfreqplot = ggplot(AAfreq) | |
646 AAfreqplot = AAfreqplot + geom_bar(aes( x=factor(reorder(Var1, order.aa)), y = freq_perc, fill = Sample), stat='identity', position='dodge' ) | |
647 AAfreqplot = AAfreqplot + annotate("rect", xmin = 0.5, xmax = 2.5, ymin = 0, ymax = Inf, fill = "red", alpha = 0.2) | |
648 AAfreqplot = AAfreqplot + annotate("rect", xmin = 3.5, xmax = 4.5, ymin = 0, ymax = Inf, fill = "blue", alpha = 0.2) | |
649 AAfreqplot = AAfreqplot + annotate("rect", xmin = 5.5, xmax = 6.5, ymin = 0, ymax = Inf, fill = "blue", alpha = 0.2) | |
650 AAfreqplot = AAfreqplot + annotate("rect", xmin = 6.5, xmax = 7.5, ymin = 0, ymax = Inf, fill = "red", alpha = 0.2) | |
651 AAfreqplot = AAfreqplot + ggtitle("Amino Acid Composition in the CDR3") + xlab("Amino Acid, from Hydrophilic (left) to Hydrophobic (right)") + ylab("Percentage") | |
652 | |
653 png("AAComposition.png",width = 1280, height = 720) | |
654 AAfreqplot | |
655 dev.off() | |
656 write.table(AAfreq, "AAComposition.csv" , sep=",",quote=F,na="-",row.names=F,col.names=T) | |
657 | |
658 |