5
|
1 # ---------------------- load/install packages ----------------------
|
|
2
|
|
3 if (!("gridExtra" %in% rownames(installed.packages()))) {
|
|
4 install.packages("gridExtra", repos="http://cran.xl-mirror.nl/")
|
|
5 }
|
|
6 library(gridExtra)
|
|
7 if (!("ggplot2" %in% rownames(installed.packages()))) {
|
|
8 install.packages("ggplot2", repos="http://cran.xl-mirror.nl/")
|
|
9 }
|
|
10 library(ggplot2)
|
|
11 if (!("plyr" %in% rownames(installed.packages()))) {
|
|
12 install.packages("plyr", repos="http://cran.xl-mirror.nl/")
|
|
13 }
|
|
14 library(plyr)
|
|
15
|
|
16 if (!("data.table" %in% rownames(installed.packages()))) {
|
|
17 install.packages("data.table", repos="http://cran.xl-mirror.nl/")
|
|
18 }
|
|
19 library(data.table)
|
|
20
|
|
21 if (!("reshape2" %in% rownames(installed.packages()))) {
|
|
22 install.packages("reshape2", repos="http://cran.xl-mirror.nl/")
|
|
23 }
|
|
24 library(reshape2)
|
|
25
|
|
26 if (!("lymphclon" %in% rownames(installed.packages()))) {
|
|
27 install.packages("lymphclon", repos="http://cran.xl-mirror.nl/")
|
|
28 }
|
|
29 library(lymphclon)
|
|
30
|
|
31 # ---------------------- parameters ----------------------
|
|
32
|
|
33 args <- commandArgs(trailingOnly = TRUE)
|
|
34
|
|
35 infile = args[1] #path to input file
|
|
36 outfile = args[2] #path to output file
|
|
37 outdir = args[3] #path to output folder (html/images/data)
|
|
38 clonaltype = args[4] #clonaltype definition, or 'none' for no unique filtering
|
|
39 ct = unlist(strsplit(clonaltype, ","))
|
|
40 species = args[5] #human or mouse
|
|
41 locus = args[6] # IGH, IGK, IGL, TRB, TRA, TRG or TRD
|
|
42 filterproductive = ifelse(args[7] == "yes", T, F) #should unproductive sequences be filtered out? (yes/no)
|
|
43 clonality_method = args[8]
|
|
44
|
|
45
|
|
46 # ---------------------- Data preperation ----------------------
|
|
47
|
|
48 print("Report Clonality - Data preperation")
|
|
49
|
13
|
50 inputdata = read.table(infile, sep="\t", header=TRUE, fill=T, comment.char="", stringsAsFactors=F)
|
5
|
51
|
42
|
52 inputdata$Sample = as.character(inputdata$Sample)
|
|
53
|
24
|
54
|
5
|
55 print(paste("nrows: ", nrow(inputdata)))
|
|
56
|
|
57 setwd(outdir)
|
|
58
|
|
59 # remove weird rows
|
|
60 inputdata = inputdata[inputdata$Sample != "",]
|
|
61
|
|
62 print(paste("nrows: ", nrow(inputdata)))
|
|
63
|
|
64 #remove the allele from the V,D and J genes
|
|
65 inputdata$Top.V.Gene = gsub("[*]([0-9]+)", "", inputdata$Top.V.Gene)
|
|
66 inputdata$Top.D.Gene = gsub("[*]([0-9]+)", "", inputdata$Top.D.Gene)
|
|
67 inputdata$Top.J.Gene = gsub("[*]([0-9]+)", "", inputdata$Top.J.Gene)
|
|
68
|
|
69 print(paste("nrows: ", nrow(inputdata)))
|
|
70
|
|
71 #filter uniques
|
|
72 inputdata.removed = inputdata[NULL,]
|
|
73
|
|
74 print(paste("nrows: ", nrow(inputdata)))
|
|
75
|
|
76 inputdata$clonaltype = 1:nrow(inputdata)
|
|
77
|
|
78 #keep track of the count of sequences in samples or samples/replicates for the front page overview
|
|
79 input.sample.count = data.frame(data.table(inputdata)[, list(All=.N), by=c("Sample")])
|
|
80 input.rep.count = data.frame(data.table(inputdata)[, list(All=.N), by=c("Sample", "Replicate")])
|
|
81
|
|
82 PRODF = inputdata
|
|
83 UNPROD = inputdata
|
|
84 if(filterproductive){
|
|
85 if("Functionality" %in% colnames(inputdata)) { # "Functionality" is an IMGT column
|
|
86 #PRODF = inputdata[inputdata$Functionality == "productive" | inputdata$Functionality == "productive (see comment)", ]
|
|
87 PRODF = inputdata[inputdata$Functionality %in% c("productive (see comment)","productive"),]
|
|
88
|
|
89 PRODF.count = data.frame(data.table(PRODF)[, list(count=.N), by=c("Sample")])
|
|
90
|
|
91 UNPROD = inputdata[inputdata$Functionality %in% c("unproductive (see comment)","unproductive"), ]
|
|
92 } else {
|
|
93 PRODF = inputdata[inputdata$VDJ.Frame != "In-frame with stop codon" & inputdata$VDJ.Frame != "Out-of-frame" & inputdata$CDR3.Found.How != "NOT_FOUND" , ]
|
|
94 UNPROD = inputdata[!(inputdata$VDJ.Frame != "In-frame with stop codon" & inputdata$VDJ.Frame != "Out-of-frame" & inputdata$CDR3.Found.How != "NOT_FOUND" ), ]
|
|
95 }
|
|
96 }
|
|
97
|
13
|
98 for(i in 1:nrow(UNPROD)){
|
|
99 if(!is.numeric(UNPROD[i,"CDR3.Length"])){
|
|
100 UNPROD[i,"CDR3.Length"] = 0
|
|
101 }
|
|
102 }
|
|
103
|
5
|
104 prod.sample.count = data.frame(data.table(PRODF)[, list(Productive=.N), by=c("Sample")])
|
|
105 prod.rep.count = data.frame(data.table(PRODF)[, list(Productive=.N), by=c("Sample", "Replicate")])
|
|
106
|
|
107 unprod.sample.count = data.frame(data.table(UNPROD)[, list(Unproductive=.N), by=c("Sample")])
|
|
108 unprod.rep.count = data.frame(data.table(UNPROD)[, list(Unproductive=.N), by=c("Sample", "Replicate")])
|
|
109
|
|
110 clonalityFrame = PRODF
|
|
111
|
|
112 #remove duplicates based on the clonaltype
|
|
113 if(clonaltype != "none"){
|
|
114 clonaltype = paste(clonaltype, ",Sample", sep="") #add sample column to clonaltype, unique within samples
|
|
115 PRODF$clonaltype = do.call(paste, c(PRODF[unlist(strsplit(clonaltype, ","))], sep = ":"))
|
|
116 PRODF = PRODF[!duplicated(PRODF$clonaltype), ]
|
|
117
|
|
118 UNPROD$clonaltype = do.call(paste, c(UNPROD[unlist(strsplit(clonaltype, ","))], sep = ":"))
|
|
119 UNPROD = UNPROD[!duplicated(UNPROD$clonaltype), ]
|
|
120
|
|
121 #again for clonalityFrame but with sample+replicate
|
|
122 clonalityFrame$clonaltype = do.call(paste, c(clonalityFrame[unlist(strsplit(clonaltype, ","))], sep = ":"))
|
|
123 clonalityFrame$clonality_clonaltype = do.call(paste, c(clonalityFrame[unlist(strsplit(paste(clonaltype, ",Replicate", sep=""), ","))], sep = ":"))
|
|
124 clonalityFrame = clonalityFrame[!duplicated(clonalityFrame$clonality_clonaltype), ]
|
|
125 }
|
|
126
|
40
|
127 if(nrow(PRODF) == 0){
|
|
128 stop("No sequences left after filtering")
|
|
129 }
|
|
130
|
5
|
131 prod.unique.sample.count = data.frame(data.table(PRODF)[, list(Productive_unique=.N), by=c("Sample")])
|
|
132 prod.unique.rep.count = data.frame(data.table(PRODF)[, list(Productive_unique=.N), by=c("Sample", "Replicate")])
|
|
133
|
|
134 unprod.unique.sample.count = data.frame(data.table(UNPROD)[, list(Unproductive_unique=.N), by=c("Sample")])
|
|
135 unprod.unique.rep.count = data.frame(data.table(UNPROD)[, list(Unproductive_unique=.N), by=c("Sample", "Replicate")])
|
|
136
|
40
|
137
|
5
|
138 PRODF$freq = 1
|
|
139
|
|
140 if(any(grepl(pattern="_", x=PRODF$ID))){ #the frequency can be stored in the ID with the pattern ".*_freq_.*"
|
|
141 PRODF$freq = gsub("^[0-9]+_", "", PRODF$ID)
|
|
142 PRODF$freq = gsub("_.*", "", PRODF$freq)
|
|
143 PRODF$freq = as.numeric(PRODF$freq)
|
|
144 if(any(is.na(PRODF$freq))){ #if there was an "_" in the ID, but not the frequency, go back to frequency of 1 for every sequence
|
|
145 PRODF$freq = 1
|
|
146 }
|
|
147 }
|
|
148
|
8
|
149 #make a names list with sample -> color
|
|
150 naive.colors = c('blue4', 'darkred', 'olivedrab3', 'red', 'gray74', 'darkviolet', 'lightblue1', 'gold', 'chartreuse2', 'pink', 'Paleturquoise3', 'Chocolate1', 'Yellow', 'Deeppink3', 'Mediumorchid1', 'Darkgreen', 'Blue', 'Gray36', 'Hotpink', 'Yellow4')
|
|
151 unique.samples = unique(PRODF$Sample)
|
|
152
|
|
153 if(length(unique.samples) <= length(naive.colors)){
|
|
154 sample.colors = naive.colors[1:length(unique.samples)]
|
|
155 } else {
|
|
156 sample.colors = rainbow(length(unique.samples))
|
|
157 }
|
|
158
|
|
159 names(sample.colors) = unique.samples
|
|
160
|
|
161 print("Sample.colors")
|
|
162 print(sample.colors)
|
5
|
163
|
|
164
|
|
165 #write the complete dataset that is left over, will be the input if 'none' for clonaltype and 'no' for filterproductive
|
|
166 write.table(PRODF, "allUnique.txt", sep="\t",quote=F,row.names=F,col.names=T)
|
18
|
167 #write.table(PRODF, "allUnique.csv", sep=",",quote=F,row.names=F,col.names=T)
|
25
|
168 write.table(UNPROD, "allUnproductive.txt", sep="\t",quote=F,row.names=F,col.names=T)
|
5
|
169
|
24
|
170 print("SAMPLE TABLE:")
|
|
171 print(table(PRODF$Sample))
|
|
172
|
5
|
173 #write the samples to a file
|
|
174 sampleFile <- file("samples.txt")
|
|
175 un = unique(inputdata$Sample)
|
|
176 un = paste(un, sep="\n")
|
|
177 writeLines(un, sampleFile)
|
|
178 close(sampleFile)
|
|
179
|
|
180 # ---------------------- Counting the productive/unproductive and unique sequences ----------------------
|
|
181
|
|
182 print("Report Clonality - counting productive/unproductive/unique")
|
|
183
|
|
184 #create the table on the overview page with the productive/unique counts per sample/replicate
|
|
185 #first for sample
|
40
|
186
|
5
|
187 sample.count = merge(input.sample.count, prod.sample.count, by="Sample", all.x=T)
|
|
188 sample.count$perc_prod = round(sample.count$Productive / sample.count$All * 100)
|
|
189 sample.count = merge(sample.count, prod.unique.sample.count, by="Sample", all.x=T)
|
|
190 sample.count$perc_prod_un = round(sample.count$Productive_unique / sample.count$All * 100)
|
|
191
|
|
192 sample.count = merge(sample.count , unprod.sample.count, by="Sample", all.x=T)
|
|
193 sample.count$perc_unprod = round(sample.count$Unproductive / sample.count$All * 100)
|
|
194 sample.count = merge(sample.count, unprod.unique.sample.count, by="Sample", all.x=T)
|
|
195 sample.count$perc_unprod_un = round(sample.count$Unproductive_unique / sample.count$All * 100)
|
|
196
|
|
197 #then sample/replicate
|
|
198 rep.count = merge(input.rep.count, prod.rep.count, by=c("Sample", "Replicate"), all.x=T)
|
40
|
199
|
|
200 print(rep.count)
|
|
201
|
|
202 fltr = is.na(rep.count$Productive)
|
|
203 if(any(fltr)){
|
|
204 rep.count[fltr,"Productive"] = 0
|
|
205 }
|
|
206
|
|
207 print(rep.count)
|
|
208
|
5
|
209 rep.count$perc_prod = round(rep.count$Productive / rep.count$All * 100)
|
|
210 rep.count = merge(rep.count, prod.unique.rep.count, by=c("Sample", "Replicate"), all.x=T)
|
|
211 rep.count$perc_prod_un = round(rep.count$Productive_unique / rep.count$All * 100)
|
|
212
|
|
213 rep.count = merge(rep.count, unprod.rep.count, by=c("Sample", "Replicate"), all.x=T)
|
|
214 rep.count$perc_unprod = round(rep.count$Unproductive / rep.count$All * 100)
|
|
215 rep.count = merge(rep.count, unprod.unique.rep.count, by=c("Sample", "Replicate"), all.x=T)
|
|
216 rep.count$perc_unprod_un = round(rep.count$Unproductive_unique / rep.count$All * 100)
|
|
217
|
|
218 rep.count$Sample = paste(rep.count$Sample, rep.count$Replicate, sep="_")
|
|
219 rep.count = rep.count[,names(rep.count) != "Replicate"]
|
|
220
|
|
221 count = rbind(sample.count, rep.count)
|
|
222
|
|
223
|
|
224
|
|
225 write.table(x=count, file="productive_counting.txt", sep=",",quote=F,row.names=F,col.names=F)
|
|
226
|
|
227 # ---------------------- V+J+CDR3 sequence count ----------------------
|
|
228
|
|
229 VJCDR3.count = data.frame(table(clonalityFrame$Top.V.Gene, clonalityFrame$Top.J.Gene, clonalityFrame$CDR3.Seq.DNA))
|
|
230 names(VJCDR3.count) = c("Top.V.Gene", "Top.J.Gene", "CDR3.Seq.DNA", "Count")
|
|
231
|
|
232 VJCDR3.count = VJCDR3.count[VJCDR3.count$Count > 0,]
|
|
233 VJCDR3.count = VJCDR3.count[order(-VJCDR3.count$Count),]
|
|
234
|
|
235 write.table(x=VJCDR3.count, file="VJCDR3_count.txt", sep="\t",quote=F,row.names=F,col.names=T)
|
|
236
|
|
237 # ---------------------- Frequency calculation for V, D and J ----------------------
|
|
238
|
|
239 print("Report Clonality - frequency calculation V, D and J")
|
|
240
|
|
241 PRODFV = data.frame(data.table(PRODF)[, list(Length=sum(freq)), by=c("Sample", "Top.V.Gene")])
|
|
242 Total = ddply(PRODFV, .(Sample), function(x) data.frame(Total = sum(x$Length)))
|
|
243 PRODFV = merge(PRODFV, Total, by.x='Sample', by.y='Sample', all.x=TRUE)
|
|
244 PRODFV = ddply(PRODFV, c("Sample", "Top.V.Gene"), summarise, relFreq= (Length*100 / Total))
|
|
245
|
|
246 PRODFD = data.frame(data.table(PRODF)[, list(Length=sum(freq)), by=c("Sample", "Top.D.Gene")])
|
|
247 Total = ddply(PRODFD, .(Sample), function(x) data.frame(Total = sum(x$Length)))
|
|
248 PRODFD = merge(PRODFD, Total, by.x='Sample', by.y='Sample', all.x=TRUE)
|
|
249 PRODFD = ddply(PRODFD, c("Sample", "Top.D.Gene"), summarise, relFreq= (Length*100 / Total))
|
|
250
|
|
251 PRODFJ = data.frame(data.table(PRODF)[, list(Length=sum(freq)), by=c("Sample", "Top.J.Gene")])
|
|
252 Total = ddply(PRODFJ, .(Sample), function(x) data.frame(Total = sum(x$Length)))
|
|
253 PRODFJ = merge(PRODFJ, Total, by.x='Sample', by.y='Sample', all.x=TRUE)
|
|
254 PRODFJ = ddply(PRODFJ, c("Sample", "Top.J.Gene"), summarise, relFreq= (Length*100 / Total))
|
|
255
|
|
256 # ---------------------- Setting up the gene names for the different species/loci ----------------------
|
|
257
|
|
258 print("Report Clonality - getting genes for species/loci")
|
|
259
|
|
260 Vchain = ""
|
|
261 Dchain = ""
|
|
262 Jchain = ""
|
|
263
|
|
264 if(species == "custom"){
|
|
265 print("Custom genes: ")
|
|
266 splt = unlist(strsplit(locus, ";"))
|
|
267 print(paste("V:", splt[1]))
|
|
268 print(paste("D:", splt[2]))
|
|
269 print(paste("J:", splt[3]))
|
|
270
|
|
271 Vchain = unlist(strsplit(splt[1], ","))
|
|
272 Vchain = data.frame(v.name = Vchain, chr.orderV = 1:length(Vchain))
|
|
273
|
|
274 Dchain = unlist(strsplit(splt[2], ","))
|
|
275 if(length(Dchain) > 0){
|
|
276 Dchain = data.frame(v.name = Dchain, chr.orderD = 1:length(Dchain))
|
|
277 } else {
|
|
278 Dchain = data.frame(v.name = character(0), chr.orderD = numeric(0))
|
|
279 }
|
|
280
|
|
281 Jchain = unlist(strsplit(splt[3], ","))
|
|
282 Jchain = data.frame(v.name = Jchain, chr.orderJ = 1:length(Jchain))
|
|
283
|
|
284 } else {
|
|
285 genes = read.table("genes.txt", sep="\t", header=TRUE, fill=T, comment.char="")
|
|
286
|
|
287 Vchain = genes[grepl(species, genes$Species) & genes$locus == locus & genes$region == "V",c("IMGT.GENE.DB", "chr.order")]
|
|
288 colnames(Vchain) = c("v.name", "chr.orderV")
|
|
289 Dchain = genes[grepl(species, genes$Species) & genes$locus == locus & genes$region == "D",c("IMGT.GENE.DB", "chr.order")]
|
|
290 colnames(Dchain) = c("v.name", "chr.orderD")
|
|
291 Jchain = genes[grepl(species, genes$Species) & genes$locus == locus & genes$region == "J",c("IMGT.GENE.DB", "chr.order")]
|
|
292 colnames(Jchain) = c("v.name", "chr.orderJ")
|
|
293 }
|
|
294 useD = TRUE
|
|
295 if(nrow(Dchain) == 0){
|
|
296 useD = FALSE
|
|
297 cat("No D Genes in this species/locus")
|
|
298 }
|
|
299 print(paste(nrow(Vchain), "genes in V"))
|
|
300 print(paste(nrow(Dchain), "genes in D"))
|
|
301 print(paste(nrow(Jchain), "genes in J"))
|
|
302
|
|
303 # ---------------------- merge with the frequency count ----------------------
|
|
304
|
|
305 PRODFV = merge(PRODFV, Vchain, by.x='Top.V.Gene', by.y='v.name', all.x=TRUE)
|
|
306
|
|
307 PRODFD = merge(PRODFD, Dchain, by.x='Top.D.Gene', by.y='v.name', all.x=TRUE)
|
|
308
|
|
309 PRODFJ = merge(PRODFJ, Jchain, by.x='Top.J.Gene', by.y='v.name', all.x=TRUE)
|
|
310
|
|
311 # ---------------------- Create the V, D and J frequency plots and write the data.frame for every plot to a file ----------------------
|
|
312
|
|
313 print("Report Clonality - V, D and J frequency plots")
|
|
314
|
|
315 pV = ggplot(PRODFV)
|
|
316 pV = pV + geom_bar( aes( x=factor(reorder(Top.V.Gene, chr.orderV)), y=relFreq, fill=Sample), stat='identity', position="dodge") + theme(axis.text.x = element_text(angle = 90, hjust = 1))
|
8
|
317 pV = pV + xlab("Summary of V gene") + ylab("Frequency") + ggtitle("Relative frequency of V gene usage") + scale_fill_manual(values=sample.colors)
|
|
318 pV = pV + theme(panel.background = element_rect(fill = "white", colour="black"),text = element_text(size=15, colour="black"), axis.text.x = element_text(angle = 45, hjust = 1), panel.grid.major.y = element_line(colour = "black"), panel.grid.major.x = element_blank())
|
18
|
319 write.table(x=PRODFV, file="VFrequency.txt", sep="\t",quote=F,row.names=F,col.names=T)
|
5
|
320
|
|
321 png("VPlot.png",width = 1280, height = 720)
|
|
322 pV
|
32
|
323 dev.off()
|
|
324
|
|
325 ggsave("VPlot.pdf", pV, width=13, height=7)
|
5
|
326
|
|
327 if(useD){
|
|
328 pD = ggplot(PRODFD)
|
|
329 pD = pD + geom_bar( aes( x=factor(reorder(Top.D.Gene, chr.orderD)), y=relFreq, fill=Sample), stat='identity', position="dodge") + theme(axis.text.x = element_text(angle = 90, hjust = 1))
|
8
|
330 pD = pD + xlab("Summary of D gene") + ylab("Frequency") + ggtitle("Relative frequency of D gene usage") + scale_fill_manual(values=sample.colors)
|
|
331 pD = pD + theme(panel.background = element_rect(fill = "white", colour="black"),text = element_text(size=15, colour="black"), axis.text.x = element_text(angle = 45, hjust = 1), panel.grid.major.y = element_line(colour = "black"), panel.grid.major.x = element_blank())
|
18
|
332 write.table(x=PRODFD, file="DFrequency.txt", sep="\t",quote=F,row.names=F,col.names=T)
|
5
|
333
|
|
334 png("DPlot.png",width = 800, height = 600)
|
|
335 print(pD)
|
32
|
336 dev.off()
|
|
337
|
|
338 ggsave("DPlot.pdf", pD, width=10, height=7)
|
5
|
339 }
|
|
340
|
|
341 pJ = ggplot(PRODFJ)
|
|
342 pJ = pJ + geom_bar( aes( x=factor(reorder(Top.J.Gene, chr.orderJ)), y=relFreq, fill=Sample), stat='identity', position="dodge") + theme(axis.text.x = element_text(angle = 90, hjust = 1))
|
8
|
343 pJ = pJ + xlab("Summary of J gene") + ylab("Frequency") + ggtitle("Relative frequency of J gene usage") + scale_fill_manual(values=sample.colors)
|
|
344 pJ = pJ + theme(panel.background = element_rect(fill = "white", colour="black"),text = element_text(size=15, colour="black"), axis.text.x = element_text(angle = 45, hjust = 1), panel.grid.major.y = element_line(colour = "black"), panel.grid.major.x = element_blank())
|
18
|
345 write.table(x=PRODFJ, file="JFrequency.txt", sep="\t",quote=F,row.names=F,col.names=T)
|
5
|
346
|
|
347 png("JPlot.png",width = 800, height = 600)
|
|
348 pJ
|
32
|
349 dev.off()
|
|
350
|
|
351 ggsave("JPlot.pdf", pJ)
|
5
|
352
|
|
353 # ---------------------- Now the frequency plots of the V, D and J families ----------------------
|
|
354
|
|
355 print("Report Clonality - V, D and J family plots")
|
|
356
|
|
357 VGenes = PRODF[,c("Sample", "Top.V.Gene")]
|
|
358 VGenes$Top.V.Gene = gsub("-.*", "", VGenes$Top.V.Gene)
|
|
359 VGenes = data.frame(data.table(VGenes)[, list(Count=.N), by=c("Sample", "Top.V.Gene")])
|
|
360 TotalPerSample = data.frame(data.table(VGenes)[, list(total=sum(.SD$Count)), by=Sample])
|
|
361 VGenes = merge(VGenes, TotalPerSample, by="Sample")
|
|
362 VGenes$Frequency = VGenes$Count * 100 / VGenes$total
|
|
363 VPlot = ggplot(VGenes)
|
|
364 VPlot = VPlot + geom_bar(aes( x = Top.V.Gene, y = Frequency, fill = Sample), stat='identity', position='dodge' ) + theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
|
|
365 ggtitle("Distribution of V gene families") +
|
8
|
366 ylab("Percentage of sequences") +
|
|
367 scale_fill_manual(values=sample.colors) +
|
|
368 theme(panel.background = element_rect(fill = "white", colour="black"),text = element_text(size=15, colour="black"), axis.text.x = element_text(angle = 45, hjust = 1), panel.grid.major.y = element_line(colour = "black"), panel.grid.major.x = element_blank())
|
5
|
369 png("VFPlot.png")
|
|
370 VPlot
|
32
|
371 dev.off()
|
|
372 ggsave("VFPlot.pdf", VPlot)
|
|
373
|
18
|
374 write.table(x=VGenes, file="VFFrequency.txt", sep="\t",quote=F,row.names=F,col.names=T)
|
5
|
375
|
|
376 if(useD){
|
|
377 DGenes = PRODF[,c("Sample", "Top.D.Gene")]
|
|
378 DGenes$Top.D.Gene = gsub("-.*", "", DGenes$Top.D.Gene)
|
|
379 DGenes = data.frame(data.table(DGenes)[, list(Count=.N), by=c("Sample", "Top.D.Gene")])
|
|
380 TotalPerSample = data.frame(data.table(DGenes)[, list(total=sum(.SD$Count)), by=Sample])
|
|
381 DGenes = merge(DGenes, TotalPerSample, by="Sample")
|
|
382 DGenes$Frequency = DGenes$Count * 100 / DGenes$total
|
|
383 DPlot = ggplot(DGenes)
|
|
384 DPlot = DPlot + geom_bar(aes( x = Top.D.Gene, y = Frequency, fill = Sample), stat='identity', position='dodge' ) + theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
|
|
385 ggtitle("Distribution of D gene families") +
|
8
|
386 ylab("Percentage of sequences") +
|
|
387 scale_fill_manual(values=sample.colors) +
|
|
388 theme(panel.background = element_rect(fill = "white", colour="black"),text = element_text(size=15, colour="black"), axis.text.x = element_text(angle = 45, hjust = 1), panel.grid.major.y = element_line(colour = "black"), panel.grid.major.x = element_blank())
|
5
|
389 png("DFPlot.png")
|
|
390 print(DPlot)
|
32
|
391 dev.off()
|
|
392
|
|
393 ggsave("DFPlot.pdf", DPlot)
|
18
|
394 write.table(x=DGenes, file="DFFrequency.txt", sep="\t",quote=F,row.names=F,col.names=T)
|
5
|
395 }
|
|
396
|
|
397 # ---------------------- Plotting the cdr3 length ----------------------
|
|
398
|
|
399 print("Report Clonality - CDR3 length plot")
|
|
400
|
9
|
401 CDR3Length = data.frame(data.table(PRODF)[, list(Count=.N), by=c("Sample", "CDR3.Length")])
|
5
|
402 TotalPerSample = data.frame(data.table(CDR3Length)[, list(total=sum(.SD$Count)), by=Sample])
|
|
403 CDR3Length = merge(CDR3Length, TotalPerSample, by="Sample")
|
|
404 CDR3Length$Frequency = CDR3Length$Count * 100 / CDR3Length$total
|
|
405 CDR3LengthPlot = ggplot(CDR3Length)
|
15
|
406 CDR3LengthPlot = CDR3LengthPlot + geom_bar(aes( x = factor(reorder(CDR3.Length, as.numeric(CDR3.Length))), y = Frequency, fill = Sample), stat='identity', position='dodge' ) + theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
|
5
|
407 ggtitle("Length distribution of CDR3") +
|
|
408 xlab("CDR3 Length") +
|
8
|
409 ylab("Percentage of sequences") +
|
|
410 scale_fill_manual(values=sample.colors) +
|
|
411 theme(panel.background = element_rect(fill = "white", colour="black"),text = element_text(size=15, colour="black"), axis.text.x = element_text(angle = 45, hjust = 1), panel.grid.major.y = element_line(colour = "black"), panel.grid.major.x = element_blank())
|
5
|
412 png("CDR3LengthPlot.png",width = 1280, height = 720)
|
|
413 CDR3LengthPlot
|
|
414 dev.off()
|
32
|
415
|
|
416 ggsave("CDR3LengthPlot.pdf", CDR3LengthPlot, width=12, height=7)
|
|
417
|
24
|
418 write.table(x=CDR3Length, file="CDR3LengthPlot.txt", sep="\t",quote=F,row.names=F,col.names=T)
|
5
|
419
|
|
420 # ---------------------- Plot the heatmaps ----------------------
|
|
421
|
|
422 #get the reverse order for the V and D genes
|
|
423 revVchain = Vchain
|
|
424 revDchain = Dchain
|
|
425 revVchain$chr.orderV = rev(revVchain$chr.orderV)
|
|
426 revDchain$chr.orderD = rev(revDchain$chr.orderD)
|
|
427
|
|
428 if(useD){
|
|
429 print("Report Clonality - Heatmaps VD")
|
|
430 plotVD <- function(dat){
|
|
431 if(length(dat[,1]) == 0){
|
|
432 return()
|
|
433 }
|
|
434
|
|
435 img = ggplot() +
|
|
436 geom_tile(data=dat, aes(x=factor(reorder(Top.D.Gene, chr.orderD)), y=factor(reorder(Top.V.Gene, chr.orderV)), fill=relLength)) +
|
|
437 theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
|
|
438 scale_fill_gradient(low="gold", high="blue", na.value="white") +
|
|
439 ggtitle(paste(unique(dat$Sample), " (N=" , sum(dat$Length, na.rm=T) ,")", sep="")) +
|
|
440 xlab("D genes") +
|
9
|
441 ylab("V Genes") +
|
14
|
442 theme(panel.background = element_rect(fill = "white", colour="black"),text = element_text(size=15, colour="black"), panel.grid.major = element_line(colour = "gainsboro"))
|
5
|
443
|
|
444 png(paste("HeatmapVD_", unique(dat[3])[1,1] , ".png", sep=""), width=150+(15*length(Dchain$v.name)), height=100+(15*length(Vchain$v.name)))
|
|
445 print(img)
|
|
446 dev.off()
|
32
|
447
|
|
448 ggsave(paste("HeatmapVD_", unique(dat[3])[1,1] , ".pdf", sep=""), img, height=13, width=8)
|
|
449
|
24
|
450 write.table(x=acast(dat, Top.V.Gene~Top.D.Gene, value.var="Length"), file=paste("HeatmapVD_", unique(dat[3])[1,1], ".txt", sep=""), sep="\t",quote=F,row.names=T,col.names=NA)
|
5
|
451 }
|
|
452
|
|
453 VandDCount = data.frame(data.table(PRODF)[, list(Length=.N), by=c("Top.V.Gene", "Top.D.Gene", "Sample")])
|
|
454
|
|
455 VandDCount$l = log(VandDCount$Length)
|
|
456 maxVD = data.frame(data.table(VandDCount)[, list(max=max(l)), by=c("Sample")])
|
|
457 VandDCount = merge(VandDCount, maxVD, by.x="Sample", by.y="Sample", all.x=T)
|
|
458 VandDCount$relLength = VandDCount$l / VandDCount$max
|
6
|
459 check = is.nan(VandDCount$relLength)
|
|
460 if(any(check)){
|
|
461 VandDCount[check,"relLength"] = 0
|
|
462 }
|
5
|
463
|
|
464 cartegianProductVD = expand.grid(Top.V.Gene = Vchain$v.name, Top.D.Gene = Dchain$v.name)
|
|
465
|
|
466 completeVD = merge(VandDCount, cartegianProductVD, by.x=c("Top.V.Gene", "Top.D.Gene"), by.y=c("Top.V.Gene", "Top.D.Gene"), all=TRUE)
|
|
467
|
|
468 completeVD = merge(completeVD, revVchain, by.x="Top.V.Gene", by.y="v.name", all.x=TRUE)
|
|
469
|
|
470 completeVD = merge(completeVD, Dchain, by.x="Top.D.Gene", by.y="v.name", all.x=TRUE)
|
|
471
|
|
472 fltr = is.nan(completeVD$relLength)
|
|
473 if(all(fltr)){
|
|
474 completeVD[fltr,"relLength"] = 0
|
|
475 }
|
|
476
|
|
477 VDList = split(completeVD, f=completeVD[,"Sample"])
|
|
478 lapply(VDList, FUN=plotVD)
|
|
479 }
|
|
480
|
|
481 print("Report Clonality - Heatmaps VJ")
|
|
482
|
|
483 plotVJ <- function(dat){
|
|
484 if(length(dat[,1]) == 0){
|
|
485 return()
|
|
486 }
|
|
487 cat(paste(unique(dat[3])[1,1]))
|
|
488 img = ggplot() +
|
|
489 geom_tile(data=dat, aes(x=factor(reorder(Top.J.Gene, chr.orderJ)), y=factor(reorder(Top.V.Gene, chr.orderV)), fill=relLength)) +
|
|
490 theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
|
|
491 scale_fill_gradient(low="gold", high="blue", na.value="white") +
|
|
492 ggtitle(paste(unique(dat$Sample), " (N=" , sum(dat$Length, na.rm=T) ,")", sep="")) +
|
|
493 xlab("J genes") +
|
9
|
494 ylab("V Genes") +
|
14
|
495 theme(panel.background = element_rect(fill = "white", colour="black"),text = element_text(size=15, colour="black"), panel.grid.major = element_line(colour = "gainsboro"))
|
5
|
496
|
|
497 png(paste("HeatmapVJ_", unique(dat[3])[1,1] , ".png", sep=""), width=150+(15*length(Jchain$v.name)), height=100+(15*length(Vchain$v.name)))
|
|
498 print(img)
|
|
499 dev.off()
|
32
|
500
|
|
501 ggsave(paste("HeatmapVJ_", unique(dat[3])[1,1] , ".pdf", sep=""), img, height=11, width=4)
|
|
502
|
24
|
503 write.table(x=acast(dat, Top.V.Gene~Top.J.Gene, value.var="Length"), file=paste("HeatmapVJ_", unique(dat[3])[1,1], ".txt", sep=""), sep="\t",quote=F,row.names=T,col.names=NA)
|
5
|
504 }
|
|
505
|
|
506 VandJCount = data.frame(data.table(PRODF)[, list(Length=.N), by=c("Top.V.Gene", "Top.J.Gene", "Sample")])
|
|
507
|
|
508 VandJCount$l = log(VandJCount$Length)
|
|
509 maxVJ = data.frame(data.table(VandJCount)[, list(max=max(l)), by=c("Sample")])
|
|
510 VandJCount = merge(VandJCount, maxVJ, by.x="Sample", by.y="Sample", all.x=T)
|
|
511 VandJCount$relLength = VandJCount$l / VandJCount$max
|
|
512
|
6
|
513 check = is.nan(VandJCount$relLength)
|
|
514 if(any(check)){
|
|
515 VandJCount[check,"relLength"] = 0
|
|
516 }
|
|
517
|
5
|
518 cartegianProductVJ = expand.grid(Top.V.Gene = Vchain$v.name, Top.J.Gene = Jchain$v.name)
|
|
519
|
|
520 completeVJ = merge(VandJCount, cartegianProductVJ, all.y=TRUE)
|
|
521 completeVJ = merge(completeVJ, revVchain, by.x="Top.V.Gene", by.y="v.name", all.x=TRUE)
|
|
522 completeVJ = merge(completeVJ, Jchain, by.x="Top.J.Gene", by.y="v.name", all.x=TRUE)
|
|
523
|
|
524 fltr = is.nan(completeVJ$relLength)
|
|
525 if(any(fltr)){
|
|
526 completeVJ[fltr,"relLength"] = 1
|
|
527 }
|
|
528
|
|
529 VJList = split(completeVJ, f=completeVJ[,"Sample"])
|
|
530 lapply(VJList, FUN=plotVJ)
|
|
531
|
|
532
|
|
533
|
|
534 if(useD){
|
|
535 print("Report Clonality - Heatmaps DJ")
|
|
536 plotDJ <- function(dat){
|
|
537 if(length(dat[,1]) == 0){
|
|
538 return()
|
|
539 }
|
|
540 img = ggplot() +
|
|
541 geom_tile(data=dat, aes(x=factor(reorder(Top.J.Gene, chr.orderJ)), y=factor(reorder(Top.D.Gene, chr.orderD)), fill=relLength)) +
|
|
542 theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
|
|
543 scale_fill_gradient(low="gold", high="blue", na.value="white") +
|
|
544 ggtitle(paste(unique(dat$Sample), " (N=" , sum(dat$Length, na.rm=T) ,")", sep="")) +
|
|
545 xlab("J genes") +
|
9
|
546 ylab("D Genes") +
|
14
|
547 theme(panel.background = element_rect(fill = "white", colour="black"),text = element_text(size=15, colour="black"), panel.grid.major = element_line(colour = "gainsboro"))
|
5
|
548
|
|
549 png(paste("HeatmapDJ_", unique(dat[3])[1,1] , ".png", sep=""), width=150+(15*length(Jchain$v.name)), height=100+(15*length(Dchain$v.name)))
|
|
550 print(img)
|
|
551 dev.off()
|
32
|
552
|
|
553 ggsave(paste("HeatmapDJ_", unique(dat[3])[1,1] , ".pdf", sep=""), img, width=4, height=7)
|
|
554
|
24
|
555 write.table(x=acast(dat, Top.D.Gene~Top.J.Gene, value.var="Length"), file=paste("HeatmapDJ_", unique(dat[3])[1,1], ".txt", sep=""), sep="\t",quote=F,row.names=T,col.names=NA)
|
5
|
556 }
|
|
557
|
|
558
|
|
559 DandJCount = data.frame(data.table(PRODF)[, list(Length=.N), by=c("Top.D.Gene", "Top.J.Gene", "Sample")])
|
|
560
|
|
561 DandJCount$l = log(DandJCount$Length)
|
|
562 maxDJ = data.frame(data.table(DandJCount)[, list(max=max(l)), by=c("Sample")])
|
|
563 DandJCount = merge(DandJCount, maxDJ, by.x="Sample", by.y="Sample", all.x=T)
|
|
564 DandJCount$relLength = DandJCount$l / DandJCount$max
|
|
565
|
6
|
566 check = is.nan(DandJCount$relLength)
|
|
567 if(any(check)){
|
|
568 DandJCount[check,"relLength"] = 0
|
|
569 }
|
|
570
|
5
|
571 cartegianProductDJ = expand.grid(Top.D.Gene = Dchain$v.name, Top.J.Gene = Jchain$v.name)
|
|
572
|
|
573 completeDJ = merge(DandJCount, cartegianProductDJ, all.y=TRUE)
|
|
574 completeDJ = merge(completeDJ, revDchain, by.x="Top.D.Gene", by.y="v.name", all.x=TRUE)
|
|
575 completeDJ = merge(completeDJ, Jchain, by.x="Top.J.Gene", by.y="v.name", all.x=TRUE)
|
|
576
|
|
577 fltr = is.nan(completeDJ$relLength)
|
|
578 if(any(fltr)){
|
|
579 completeDJ[fltr, "relLength"] = 1
|
|
580 }
|
|
581
|
|
582 DJList = split(completeDJ, f=completeDJ[,"Sample"])
|
|
583 lapply(DJList, FUN=plotDJ)
|
|
584 }
|
|
585
|
|
586
|
|
587 # ---------------------- output tables for the circos plots ----------------------
|
|
588
|
|
589 print("Report Clonality - Circos data")
|
|
590
|
|
591 for(smpl in unique(PRODF$Sample)){
|
|
592 PRODF.sample = PRODF[PRODF$Sample == smpl,]
|
|
593
|
|
594 fltr = PRODF.sample$Top.V.Gene == ""
|
|
595 if(any(fltr, na.rm=T)){
|
|
596 PRODF.sample[fltr, "Top.V.Gene"] = "NA"
|
|
597 }
|
|
598
|
|
599 fltr = PRODF.sample$Top.D.Gene == ""
|
|
600 if(any(fltr, na.rm=T)){
|
|
601 PRODF.sample[fltr, "Top.D.Gene"] = "NA"
|
|
602 }
|
|
603
|
|
604 fltr = PRODF.sample$Top.J.Gene == ""
|
|
605 if(any(fltr, na.rm=T)){
|
|
606 PRODF.sample[fltr, "Top.J.Gene"] = "NA"
|
|
607 }
|
|
608
|
|
609 v.d = table(PRODF.sample$Top.V.Gene, PRODF.sample$Top.D.Gene)
|
|
610 v.j = table(PRODF.sample$Top.V.Gene, PRODF.sample$Top.J.Gene)
|
|
611 d.j = table(PRODF.sample$Top.D.Gene, PRODF.sample$Top.J.Gene)
|
|
612
|
|
613 write.table(v.d, file=paste(smpl, "_VD_circos.txt", sep=""), sep="\t", quote=F, row.names=T, col.names=NA)
|
|
614 write.table(v.j, file=paste(smpl, "_VJ_circos.txt", sep=""), sep="\t", quote=F, row.names=T, col.names=NA)
|
|
615 write.table(d.j, file=paste(smpl, "_DJ_circos.txt", sep=""), sep="\t", quote=F, row.names=T, col.names=NA)
|
|
616 }
|
|
617
|
|
618 # ---------------------- calculating the clonality score ----------------------
|
|
619
|
|
620 if("Replicate" %in% colnames(inputdata)) #can only calculate clonality score when replicate information is available
|
|
621 {
|
|
622 print("Report Clonality - Clonality")
|
18
|
623 write.table(clonalityFrame, "clonalityComplete.txt", sep="\t",quote=F,row.names=F,col.names=T)
|
5
|
624 if(clonality_method == "boyd"){
|
|
625 samples = split(clonalityFrame, clonalityFrame$Sample, drop=T)
|
|
626
|
|
627 for (sample in samples){
|
|
628 res = data.frame(paste=character(0))
|
|
629 sample_id = unique(sample$Sample)[[1]]
|
|
630 for(replicate in unique(sample$Replicate)){
|
|
631 tmp = sample[sample$Replicate == replicate,]
|
|
632 clone_table = data.frame(table(tmp$clonaltype))
|
|
633 clone_col_name = paste("V", replicate, sep="")
|
|
634 colnames(clone_table) = c("paste", clone_col_name)
|
|
635 res = merge(res, clone_table, by="paste", all=T)
|
|
636 }
|
|
637
|
17
|
638 res[is.na(res)] = 0
|
|
639
|
20
|
640 write.table(res, file=paste("raw_clonality_", sample_id, ".txt", sep=""), sep="\t",quote=F,row.names=F,col.names=F)
|
|
641 write.table(as.matrix(res[,2:ncol(res)]), file=paste("raw_clonality2_", sample_id, ".txt", sep=""), sep="\t",quote=F,row.names=F,col.names=F)
|
|
642
|
|
643 res = read.table(paste("raw_clonality_", sample_id, ".txt", sep=""), header=F, sep="\t", quote="", stringsAsFactors=F, fill=T, comment.char="")
|
17
|
644
|
5
|
645 infer.result = infer.clonality(as.matrix(res[,2:ncol(res)]))
|
|
646
|
13
|
647 #print(infer.result)
|
5
|
648
|
20
|
649 write.table(data.table(infer.result[[12]]), file=paste("lymphclon_clonality_", sample_id, ".txt", sep=""), sep="\t",quote=F,row.names=F,col.names=F)
|
5
|
650
|
|
651 res$type = rowSums(res[,2:ncol(res)])
|
|
652
|
|
653 coincidence.table = data.frame(table(res$type))
|
|
654 colnames(coincidence.table) = c("Coincidence Type", "Raw Coincidence Freq")
|
20
|
655 write.table(coincidence.table, file=paste("lymphclon_coincidences_", sample_id, ".txt", sep=""), sep="\t",quote=F,row.names=F,col.names=T)
|
5
|
656 }
|
26
|
657 }
|
|
658 clonalFreq = data.frame(data.table(clonalityFrame)[, list(Type=.N), by=c("Sample", "clonaltype")])
|
|
659
|
|
660 #write files for every coincidence group of >1
|
|
661 samples = unique(clonalFreq$Sample)
|
|
662 for(sample in samples){
|
|
663 clonalFreqSample = clonalFreq[clonalFreq$Sample == sample,]
|
|
664 if(max(clonalFreqSample$Type) > 1){
|
|
665 for(i in 2:max(clonalFreqSample$Type)){
|
|
666 clonalFreqSampleType = clonalFreqSample[clonalFreqSample$Type == i,]
|
|
667 clonalityFrame.sub = clonalityFrame[clonalityFrame$clonaltype %in% clonalFreqSampleType$clonaltype,]
|
|
668 clonalityFrame.sub = clonalityFrame.sub[order(clonalityFrame.sub$clonaltype),]
|
|
669 write.table(clonalityFrame.sub, file=paste("coincidences_", sample, "_", i, ".txt", sep=""), sep="\t",quote=F,row.names=F,col.names=T)
|
|
670 }
|
|
671 }
|
|
672 }
|
|
673
|
|
674 clonalFreqCount = data.frame(data.table(clonalFreq)[, list(Count=.N), by=c("Sample", "Type")])
|
|
675 clonalFreqCount$realCount = clonalFreqCount$Type * clonalFreqCount$Count
|
|
676 clonalSum = data.frame(data.table(clonalFreqCount)[, list(Reads=sum(realCount)), by=c("Sample")])
|
|
677 clonalFreqCount = merge(clonalFreqCount, clonalSum, by.x="Sample", by.y="Sample")
|
|
678
|
|
679 ct = c('Type\tWeight\n2\t1\n3\t3\n4\t6\n5\t10\n6\t15')
|
|
680 tcct = textConnection(ct)
|
|
681 CT = read.table(tcct, sep="\t", header=TRUE)
|
|
682 close(tcct)
|
|
683 clonalFreqCount = merge(clonalFreqCount, CT, by.x="Type", by.y="Type", all.x=T)
|
|
684 clonalFreqCount$WeightedCount = clonalFreqCount$Count * clonalFreqCount$Weight
|
|
685
|
|
686 ReplicateReads = data.frame(data.table(clonalityFrame)[, list(Type=.N), by=c("Sample", "Replicate", "clonaltype")])
|
|
687 ReplicateReads = data.frame(data.table(ReplicateReads)[, list(Reads=.N), by=c("Sample", "Replicate")])
|
|
688 clonalFreqCount$Reads = as.numeric(clonalFreqCount$Reads)
|
|
689 ReplicateReads$Reads = as.numeric(ReplicateReads$Reads)
|
|
690 ReplicateReads$squared = as.numeric(ReplicateReads$Reads * ReplicateReads$Reads)
|
|
691
|
|
692 ReplicatePrint <- function(dat){
|
|
693 write.table(dat[-1], paste("ReplicateReads_", unique(dat[1])[1,1] , ".txt", sep=""), sep="\t",quote=F,na="-",row.names=F,col.names=F)
|
5
|
694 }
|
26
|
695
|
|
696 ReplicateSplit = split(ReplicateReads, f=ReplicateReads[,"Sample"])
|
|
697 lapply(ReplicateSplit, FUN=ReplicatePrint)
|
|
698
|
|
699 ReplicateReads = data.frame(data.table(ReplicateReads)[, list(ReadsSum=sum(as.numeric(Reads)), ReadsSquaredSum=sum(as.numeric(squared))), by=c("Sample")])
|
|
700 clonalFreqCount = merge(clonalFreqCount, ReplicateReads, by.x="Sample", by.y="Sample", all.x=T)
|
|
701
|
|
702 ReplicateSumPrint <- function(dat){
|
|
703 write.table(dat[-1], paste("ReplicateSumReads_", unique(dat[1])[1,1] , ".txt", sep=""), sep="\t",quote=F,na="-",row.names=F,col.names=F)
|
|
704 }
|
|
705
|
|
706 ReplicateSumSplit = split(ReplicateReads, f=ReplicateReads[,"Sample"])
|
|
707 lapply(ReplicateSumSplit, FUN=ReplicateSumPrint)
|
|
708
|
|
709 clonalFreqCountSum = data.frame(data.table(clonalFreqCount)[, list(Numerator=sum(WeightedCount, na.rm=T)), by=c("Sample")])
|
|
710 clonalFreqCount = merge(clonalFreqCount, clonalFreqCountSum, by.x="Sample", by.y="Sample", all.x=T)
|
|
711 clonalFreqCount$ReadsSum = as.numeric(clonalFreqCount$ReadsSum) #prevent integer overflow
|
|
712 clonalFreqCount$Denominator = (((clonalFreqCount$ReadsSum * clonalFreqCount$ReadsSum) - clonalFreqCount$ReadsSquaredSum) / 2)
|
|
713 clonalFreqCount$Result = (clonalFreqCount$Numerator + 1) / (clonalFreqCount$Denominator + 1)
|
|
714
|
|
715 ClonalityScorePrint <- function(dat){
|
|
716 write.table(dat$Result, paste("ClonalityScore_", unique(dat[1])[1,1] , ".txt", sep=""), sep="\t",quote=F,na="-",row.names=F,col.names=F)
|
|
717 }
|
|
718
|
|
719 clonalityScore = clonalFreqCount[c("Sample", "Result")]
|
|
720 clonalityScore = unique(clonalityScore)
|
|
721
|
|
722 clonalityScoreSplit = split(clonalityScore, f=clonalityScore[,"Sample"])
|
|
723 lapply(clonalityScoreSplit, FUN=ClonalityScorePrint)
|
|
724
|
|
725 clonalityOverview = clonalFreqCount[c("Sample", "Type", "Count", "Weight", "WeightedCount")]
|
|
726
|
|
727
|
|
728
|
|
729 ClonalityOverviewPrint <- function(dat){
|
|
730 dat = dat[order(dat[,2]),]
|
|
731 write.table(dat[-1], paste("ClonalityOverView_", unique(dat[1])[1,1] , ".txt", sep=""), sep="\t",quote=F,na="-",row.names=F,col.names=F)
|
|
732 }
|
|
733
|
|
734 clonalityOverviewSplit = split(clonalityOverview, f=clonalityOverview$Sample)
|
|
735 lapply(clonalityOverviewSplit, FUN=ClonalityOverviewPrint)
|
|
736
|
5
|
737 }
|
|
738
|
|
739 bak = PRODF
|
25
|
740 bakun = UNPROD
|
5
|
741
|
|
742 imgtcolumns = c("X3V.REGION.trimmed.nt.nb","P3V.nt.nb", "N1.REGION.nt.nb", "P5D.nt.nb", "X5D.REGION.trimmed.nt.nb", "X3D.REGION.trimmed.nt.nb", "P3D.nt.nb", "N2.REGION.nt.nb", "P5J.nt.nb", "X5J.REGION.trimmed.nt.nb", "X3V.REGION.trimmed.nt.nb", "X5D.REGION.trimmed.nt.nb", "X3D.REGION.trimmed.nt.nb", "X5J.REGION.trimmed.nt.nb", "N1.REGION.nt.nb", "N2.REGION.nt.nb", "P3V.nt.nb", "P5D.nt.nb", "P3D.nt.nb", "P5J.nt.nb")
|
|
743 if(all(imgtcolumns %in% colnames(inputdata)))
|
|
744 {
|
|
745 print("found IMGT columns, running junction analysis")
|
24
|
746
|
5
|
747 #ensure certain columns are in the data (files generated with older versions of IMGT Loader)
|
|
748 col.checks = c("N.REGION.nt.nb", "N1.REGION.nt.nb", "N2.REGION.nt.nb", "N3.REGION.nt.nb", "N4.REGION.nt.nb")
|
|
749 for(col.check in col.checks){
|
|
750 if(!(col.check %in% names(PRODF))){
|
|
751 print(paste(col.check, "not found adding new column"))
|
|
752 if(nrow(PRODF) > 0){ #because R is anoying...
|
|
753 PRODF[,col.check] = 0
|
|
754 } else {
|
|
755 PRODF = cbind(PRODF, data.frame(N3.REGION.nt.nb=numeric(0), N4.REGION.nt.nb=numeric(0)))
|
|
756 }
|
|
757 if(nrow(UNPROD) > 0){
|
|
758 UNPROD[,col.check] = 0
|
|
759 } else {
|
|
760 UNPROD = cbind(UNPROD, data.frame(N3.REGION.nt.nb=numeric(0), N4.REGION.nt.nb=numeric(0)))
|
|
761 }
|
|
762 }
|
|
763 }
|
|
764
|
24
|
765 PRODF.with.D = PRODF[nchar(PRODF$Top.D.Gene, keepNA=F) > 2,]
|
|
766 PRODF.no.D = PRODF[nchar(PRODF$Top.D.Gene, keepNA=F) < 4,]
|
26
|
767 write.table(PRODF.no.D, "productive_no_D.txt" , sep="\t",quote=F,na="-",row.names=F,col.names=T)
|
24
|
768
|
25
|
769 UNPROD.with.D = UNPROD[nchar(UNPROD$Top.D.Gene, keepNA=F) > 2,]
|
|
770 UNPROD.no.D = UNPROD[nchar(UNPROD$Top.D.Gene, keepNA=F) < 4,]
|
26
|
771 write.table(UNPROD.no.D, "unproductive_no_D.txt" , sep="\t",quote=F,na="-",row.names=F,col.names=T)
|
25
|
772
|
5
|
773 num_median = function(x, na.rm=T) { as.numeric(median(x, na.rm=na.rm)) }
|
25
|
774
|
24
|
775 newData = data.frame(data.table(PRODF.with.D)[,list(unique=.N,
|
5
|
776 VH.DEL=mean(.SD$X3V.REGION.trimmed.nt.nb, na.rm=T),
|
|
777 P1=mean(.SD$P3V.nt.nb, na.rm=T),
|
|
778 N1=mean(rowSums(.SD[,c("N.REGION.nt.nb", "N1.REGION.nt.nb"), with=F], na.rm=T)),
|
|
779 P2=mean(.SD$P5D.nt.nb, na.rm=T),
|
|
780 DEL.DH=mean(.SD$X5D.REGION.trimmed.nt.nb, na.rm=T),
|
|
781 DH.DEL=mean(.SD$X3D.REGION.trimmed.nt.nb, na.rm=T),
|
|
782 P3=mean(.SD$P3D.nt.nb, na.rm=T),
|
|
783 N2=mean(rowSums(.SD[,c("N2.REGION.nt.nb", "N3.REGION.nt.nb", "N4.REGION.nt.nb"), with=F], na.rm=T)),
|
|
784 P4=mean(.SD$P5J.nt.nb, na.rm=T),
|
|
785 DEL.JH=mean(.SD$X5J.REGION.trimmed.nt.nb, na.rm=T),
|
|
786 Total.Del=mean(rowSums(.SD[,c("X3V.REGION.trimmed.nt.nb", "X5D.REGION.trimmed.nt.nb", "X3D.REGION.trimmed.nt.nb", "X5J.REGION.trimmed.nt.nb"), with=F], na.rm=T)),
|
|
787 Total.N=mean(rowSums(.SD[,c("N.REGION.nt.nb", "N1.REGION.nt.nb", "N2.REGION.nt.nb", "N3.REGION.nt.nb", "N4.REGION.nt.nb"), with=F], na.rm=T)),
|
|
788 Total.P=mean(rowSums(.SD[,c("P3V.nt.nb", "P5D.nt.nb", "P3D.nt.nb", "P5J.nt.nb"), with=F], na.rm=T)),
|
38
|
789 Median.CDR3.l=as.double(median(as.numeric(.SD$CDR3.Length), na.rm=T))),
|
5
|
790 by=c("Sample")])
|
|
791 newData[,sapply(newData, is.numeric)] = round(newData[,sapply(newData, is.numeric)],1)
|
24
|
792 write.table(newData, "junctionAnalysisProd_mean_wD.txt" , sep="\t",quote=F,na="-",row.names=F,col.names=F)
|
5
|
793
|
24
|
794 newData = data.frame(data.table(PRODF.with.D)[,list(unique=.N,
|
5
|
795 VH.DEL=num_median(.SD$X3V.REGION.trimmed.nt.nb, na.rm=T),
|
|
796 P1=num_median(.SD$P3V.nt.nb, na.rm=T),
|
|
797 N1=num_median(rowSums(.SD[,c("N.REGION.nt.nb", "N1.REGION.nt.nb"), with=F], na.rm=T)),
|
|
798 P2=num_median(.SD$P5D.nt.nb, na.rm=T),
|
|
799 DEL.DH=num_median(.SD$X5D.REGION.trimmed.nt.nb, na.rm=T),
|
|
800 DH.DEL=num_median(.SD$X3D.REGION.trimmed.nt.nb, na.rm=T),
|
|
801 P3=num_median(.SD$P3D.nt.nb, na.rm=T),
|
|
802 N2=num_median(rowSums(.SD[,c("N2.REGION.nt.nb", "N3.REGION.nt.nb", "N4.REGION.nt.nb"), with=F], na.rm=T)),
|
|
803 P4=num_median(.SD$P5J.nt.nb, na.rm=T),
|
|
804 DEL.JH=num_median(.SD$X5J.REGION.trimmed.nt.nb, na.rm=T),
|
|
805 Total.Del=num_median(rowSums(.SD[,c("X3V.REGION.trimmed.nt.nb", "X5D.REGION.trimmed.nt.nb", "X3D.REGION.trimmed.nt.nb", "X5J.REGION.trimmed.nt.nb"), with=F], na.rm=T)),
|
|
806 Total.N=num_median(rowSums(.SD[,c("N.REGION.nt.nb", "N1.REGION.nt.nb", "N2.REGION.nt.nb", "N3.REGION.nt.nb", "N4.REGION.nt.nb"), with=F], na.rm=T)),
|
|
807 Total.P=num_median(rowSums(.SD[,c("P3V.nt.nb", "P5D.nt.nb", "P3D.nt.nb", "P5J.nt.nb"), with=F], na.rm=T)),
|
38
|
808 Median.CDR3.l=as.double(median(as.numeric(.SD$CDR3.Length), na.rm=T))),
|
5
|
809 by=c("Sample")])
|
|
810 newData[,sapply(newData, is.numeric)] = round(newData[,sapply(newData, is.numeric)],1)
|
24
|
811 write.table(newData, "junctionAnalysisProd_median_wD.txt" , sep="\t",quote=F,na="-",row.names=F,col.names=F)
|
5
|
812
|
25
|
813 newData = data.frame(data.table(UNPROD.with.D)[,list(unique=.N,
|
5
|
814 VH.DEL=mean(.SD$X3V.REGION.trimmed.nt.nb, na.rm=T),
|
|
815 P1=mean(.SD$P3V.nt.nb, na.rm=T),
|
|
816 N1=mean(rowSums(.SD[,c("N.REGION.nt.nb", "N1.REGION.nt.nb"), with=F], na.rm=T)),
|
|
817 P2=mean(.SD$P5D.nt.nb, na.rm=T),
|
|
818 DEL.DH=mean(.SD$X5D.REGION.trimmed.nt.nb, na.rm=T),
|
|
819 DH.DEL=mean(.SD$X3D.REGION.trimmed.nt.nb, na.rm=T),
|
|
820 P3=mean(.SD$P3D.nt.nb, na.rm=T),
|
|
821 N2=mean(rowSums(.SD[,c("N2.REGION.nt.nb", "N3.REGION.nt.nb", "N4.REGION.nt.nb"), with=F], na.rm=T)),
|
|
822 P4=mean(.SD$P5J.nt.nb, na.rm=T),
|
|
823 DEL.JH=mean(.SD$X5J.REGION.trimmed.nt.nb, na.rm=T),
|
|
824 Total.Del=mean(rowSums(.SD[,c("X3V.REGION.trimmed.nt.nb", "X5D.REGION.trimmed.nt.nb", "X3D.REGION.trimmed.nt.nb", "X5J.REGION.trimmed.nt.nb"), with=F], na.rm=T)),
|
|
825 Total.N=mean(rowSums(.SD[,c("N.REGION.nt.nb", "N1.REGION.nt.nb", "N2.REGION.nt.nb", "N3.REGION.nt.nb", "N4.REGION.nt.nb"), with=F], na.rm=T)),
|
|
826 Total.P=mean(rowSums(.SD[,c("P3V.nt.nb", "P5D.nt.nb", "P3D.nt.nb", "P5J.nt.nb"), with=F], na.rm=T)),
|
38
|
827 Median.CDR3.l=as.double(median(as.numeric(.SD$CDR3.Length), na.rm=T))),
|
5
|
828 by=c("Sample")])
|
|
829 newData[,sapply(newData, is.numeric)] = round(newData[,sapply(newData, is.numeric)],1)
|
24
|
830 write.table(newData, "junctionAnalysisUnProd_mean_wD.txt" , sep="\t",quote=F,na="-",row.names=F,col.names=F)
|
5
|
831
|
25
|
832 newData = data.frame(data.table(UNPROD.with.D)[,list(unique=.N,
|
5
|
833 VH.DEL=num_median(.SD$X3V.REGION.trimmed.nt.nb, na.rm=T),
|
|
834 P1=num_median(.SD$P3V.nt.nb, na.rm=T),
|
|
835 N1=num_median(rowSums(.SD[,c("N.REGION.nt.nb", "N1.REGION.nt.nb"), with=F], na.rm=T)),
|
|
836 P2=num_median(.SD$P5D.nt.nb, na.rm=T),
|
|
837 DEL.DH=num_median(.SD$X5D.REGION.trimmed.nt.nb, na.rm=T),
|
|
838 DH.DEL=num_median(.SD$X3D.REGION.trimmed.nt.nb, na.rm=T),
|
|
839 P3=num_median(.SD$P3D.nt.nb, na.rm=T),
|
|
840 N2=num_median(rowSums(.SD[,c("N2.REGION.nt.nb", "N3.REGION.nt.nb", "N4.REGION.nt.nb"), with=F], na.rm=T)),
|
|
841 P4=num_median(.SD$P5J.nt.nb, na.rm=T),
|
|
842 DEL.JH=num_median(.SD$X5J.REGION.trimmed.nt.nb, na.rm=T),
|
|
843 Total.Del=num_median(rowSums(.SD[,c("X3V.REGION.trimmed.nt.nb", "X5D.REGION.trimmed.nt.nb", "X3D.REGION.trimmed.nt.nb", "X5J.REGION.trimmed.nt.nb"), with=F], na.rm=T)),
|
|
844 Total.N=num_median(rowSums(.SD[,c("N.REGION.nt.nb", "N1.REGION.nt.nb", "N2.REGION.nt.nb", "N3.REGION.nt.nb", "N4.REGION.nt.nb"), with=F], na.rm=T)),
|
|
845 Total.P=num_median(rowSums(.SD[,c("P3V.nt.nb", "P5D.nt.nb", "P3D.nt.nb", "P5J.nt.nb"), with=F], na.rm=T)),
|
38
|
846 Median.CDR3.l=as.double(median(as.numeric(.SD$CDR3.Length), na.rm=T))),
|
5
|
847 by=c("Sample")])
|
24
|
848 newData[,sapply(newData, is.numeric)] = round(newData[,sapply(newData, is.numeric)],1)
|
|
849 write.table(newData, "junctionAnalysisUnProd_median_wD.txt" , sep="\t",quote=F,na="-",row.names=F,col.names=F)
|
|
850
|
|
851 #---------------- again for no-D
|
|
852
|
|
853 newData = data.frame(data.table(PRODF.no.D)[,list(unique=.N,
|
|
854 VH.DEL=mean(.SD$X3V.REGION.trimmed.nt.nb, na.rm=T),
|
|
855 P1=mean(.SD$P3V.nt.nb, na.rm=T),
|
26
|
856 N1=mean(.SD$N.REGION.nt.nb, na.rm=T),
|
24
|
857 P2=mean(.SD$P5J.nt.nb, na.rm=T),
|
|
858 DEL.JH=mean(.SD$X5J.REGION.trimmed.nt.nb, na.rm=T),
|
|
859 Total.Del=mean(rowSums(.SD[,c("X3V.REGION.trimmed.nt.nb", "X5J.REGION.trimmed.nt.nb"), with=F], na.rm=T)),
|
26
|
860 Total.N=mean(.SD$N.REGION.nt.nb, na.rm=T),
|
24
|
861 Total.P=mean(rowSums(.SD[,c("P3V.nt.nb", "P5J.nt.nb"), with=F], na.rm=T)),
|
38
|
862 Median.CDR3.l=as.double(median(as.numeric(.SD$CDR3.Length), na.rm=T))),
|
24
|
863 by=c("Sample")])
|
5
|
864 newData[,sapply(newData, is.numeric)] = round(newData[,sapply(newData, is.numeric)],1)
|
24
|
865 write.table(newData, "junctionAnalysisProd_mean_nD.txt" , sep="\t",quote=F,na="-",row.names=F,col.names=F)
|
|
866
|
|
867 newData = data.frame(data.table(PRODF.no.D)[,list(unique=.N,
|
|
868 VH.DEL=num_median(.SD$X3V.REGION.trimmed.nt.nb, na.rm=T),
|
|
869 P1=num_median(.SD$P3V.nt.nb, na.rm=T),
|
30
|
870 N1=num_median(.SD$N.REGION.nt.nb, na.rm=T),
|
24
|
871 P2=num_median(.SD$P5J.nt.nb, na.rm=T),
|
|
872 DEL.JH=num_median(.SD$X5J.REGION.trimmed.nt.nb, na.rm=T),
|
|
873 Total.Del=num_median(rowSums(.SD[,c("X3V.REGION.trimmed.nt.nb", "X5J.REGION.trimmed.nt.nb"), with=F], na.rm=T)),
|
30
|
874 Total.N=num_median(.SD$N.REGION.nt.nb, na.rm=T),
|
24
|
875 Total.P=num_median(rowSums(.SD[,c("P3V.nt.nb", "P5J.nt.nb"), with=F], na.rm=T)),
|
38
|
876 Median.CDR3.l=as.double(median(as.numeric(.SD$CDR3.Length), na.rm=T))),
|
24
|
877 by=c("Sample")])
|
|
878 newData[,sapply(newData, is.numeric)] = round(newData[,sapply(newData, is.numeric)],1)
|
|
879 write.table(newData, "junctionAnalysisProd_median_nD.txt" , sep="\t",quote=F,na="-",row.names=F,col.names=F)
|
|
880
|
25
|
881 newData = data.frame(data.table(UNPROD.no.D)[,list(unique=.N,
|
24
|
882 VH.DEL=mean(.SD$X3V.REGION.trimmed.nt.nb, na.rm=T),
|
|
883 P1=mean(.SD$P3V.nt.nb, na.rm=T),
|
26
|
884 N1=mean(.SD$N.REGION.nt.nb, na.rm=T),
|
24
|
885 P2=mean(.SD$P5J.nt.nb, na.rm=T),
|
|
886 DEL.JH=mean(.SD$X5J.REGION.trimmed.nt.nb, na.rm=T),
|
|
887 Total.Del=mean(rowSums(.SD[,c("X3V.REGION.trimmed.nt.nb", "X5J.REGION.trimmed.nt.nb"), with=F], na.rm=T)),
|
26
|
888 Total.N=mean(.SD$N.REGION.nt.nb, na.rm=T),
|
24
|
889 Total.P=mean(rowSums(.SD[,c("P3V.nt.nb", "P5J.nt.nb"), with=F], na.rm=T)),
|
38
|
890 Median.CDR3.l=as.double(median(as.numeric(.SD$CDR3.Length), na.rm=T))),
|
24
|
891 by=c("Sample")])
|
|
892 newData[,sapply(newData, is.numeric)] = round(newData[,sapply(newData, is.numeric)],1)
|
|
893 write.table(newData, "junctionAnalysisUnProd_mean_nD.txt" , sep="\t",quote=F,na="-",row.names=F,col.names=F)
|
|
894
|
26
|
895
|
25
|
896 newData = data.frame(data.table(UNPROD.no.D)[,list(unique=.N,
|
24
|
897 VH.DEL=num_median(.SD$X3V.REGION.trimmed.nt.nb, na.rm=T),
|
|
898 P1=num_median(.SD$P3V.nt.nb, na.rm=T),
|
30
|
899 N1=num_median(.SD$N.REGION.nt.nb, na.rm=T),
|
24
|
900 P2=num_median(.SD$P5J.nt.nb, na.rm=T),
|
|
901 DEL.JH=num_median(.SD$X5J.REGION.trimmed.nt.nb, na.rm=T),
|
|
902 Total.Del=num_median(rowSums(.SD[,c("X3V.REGION.trimmed.nt.nb", "X5J.REGION.trimmed.nt.nb"), with=F], na.rm=T)),
|
30
|
903 Total.N=num_median(.SD$N.REGION.nt.nb, na.rm=T),
|
24
|
904 Total.P=num_median(rowSums(.SD[,c("P3V.nt.nb", "P5J.nt.nb"), with=F], na.rm=T)),
|
38
|
905 Median.CDR3.l=as.double(median(as.numeric(.SD$CDR3.Length), na.rm=T))),
|
24
|
906 by=c("Sample")])
|
|
907 newData[,sapply(newData, is.numeric)] = round(newData[,sapply(newData, is.numeric)],1)
|
|
908 write.table(newData, "junctionAnalysisUnProd_median_nD.txt" , sep="\t",quote=F,na="-",row.names=F,col.names=F)
|
5
|
909 }
|
|
910
|
|
911 PRODF = bak
|
25
|
912 UNPROD = bakun
|
5
|
913
|
|
914
|
|
915 # ---------------------- D reading frame ----------------------
|
|
916
|
8
|
917 D.REGION.reading.frame = PRODF[,c("Sample", "D.REGION.reading.frame")]
|
5
|
918
|
8
|
919 chck = is.na(D.REGION.reading.frame$D.REGION.reading.frame)
|
|
920 if(any(chck)){
|
|
921 D.REGION.reading.frame[chck,"D.REGION.reading.frame"] = "No D"
|
|
922 }
|
5
|
923
|
24
|
924 D.REGION.reading.frame.1 = data.frame(data.table(D.REGION.reading.frame)[, list(Freq=.N), by=c("Sample", "D.REGION.reading.frame")])
|
|
925
|
|
926 D.REGION.reading.frame.2 = data.frame(data.table(D.REGION.reading.frame)[, list(sample.sum=sum(as.numeric(.SD$D.REGION.reading.frame), na.rm=T)), by=c("Sample")])
|
5
|
927
|
24
|
928 D.REGION.reading.frame = merge(D.REGION.reading.frame.1, D.REGION.reading.frame.2, by="Sample")
|
|
929
|
|
930 D.REGION.reading.frame$percentage = round(D.REGION.reading.frame$Freq / D.REGION.reading.frame$sample.sum * 100, 1)
|
|
931
|
|
932 write.table(D.REGION.reading.frame, "DReadingFrame.txt" , sep="\t",quote=F,row.names=F,col.names=T)
|
5
|
933
|
|
934 D.REGION.reading.frame = ggplot(D.REGION.reading.frame)
|
29
|
935 D.REGION.reading.frame = D.REGION.reading.frame + geom_bar(aes( x = D.REGION.reading.frame, y = percentage, fill=Sample), stat='identity', position='dodge' ) + ggtitle("D reading frame") + xlab("Frame") + ylab("Frequency")
|
8
|
936 D.REGION.reading.frame = D.REGION.reading.frame + scale_fill_manual(values=sample.colors)
|
|
937 D.REGION.reading.frame = D.REGION.reading.frame + theme(panel.background = element_rect(fill = "white", colour="black"),text = element_text(size=15, colour="black"), axis.text.x = element_text(angle = 45, hjust = 1), panel.grid.major.y = element_line(colour = "black"), panel.grid.major.x = element_blank())
|
5
|
938
|
|
939 png("DReadingFrame.png")
|
|
940 D.REGION.reading.frame
|
|
941 dev.off()
|
|
942
|
32
|
943 ggsave("DReadingFrame.pdf", D.REGION.reading.frame)
|
5
|
944
|
|
945 # ---------------------- AA composition in CDR3 ----------------------
|
|
946
|
|
947 AACDR3 = PRODF[,c("Sample", "CDR3.Seq")]
|
|
948
|
|
949 TotalPerSample = data.frame(data.table(AACDR3)[, list(total=sum(nchar(as.character(.SD$CDR3.Seq)))), by=Sample])
|
|
950
|
|
951 AAfreq = list()
|
|
952
|
|
953 for(i in 1:nrow(TotalPerSample)){
|
|
954 sample = TotalPerSample$Sample[i]
|
|
955 AAfreq[[i]] = data.frame(table(unlist(strsplit(as.character(AACDR3[AACDR3$Sample == sample,c("CDR3.Seq")]), ""))))
|
|
956 AAfreq[[i]]$Sample = sample
|
|
957 }
|
|
958
|
|
959 AAfreq = ldply(AAfreq, data.frame)
|
|
960 AAfreq = merge(AAfreq, TotalPerSample, by="Sample", all.x = T)
|
|
961 AAfreq$freq_perc = as.numeric(AAfreq$Freq / AAfreq$total * 100)
|
|
962
|
|
963
|
|
964 AAorder = read.table(sep="\t", header=TRUE, text="order.aa\tAA\n1\tR\n2\tK\n3\tN\n4\tD\n5\tQ\n6\tE\n7\tH\n8\tP\n9\tY\n10\tW\n11\tS\n12\tT\n13\tG\n14\tA\n15\tM\n16\tC\n17\tF\n18\tL\n19\tV\n20\tI")
|
|
965 AAfreq = merge(AAfreq, AAorder, by.x='Var1', by.y='AA', all.x=TRUE)
|
|
966
|
|
967 AAfreq = AAfreq[!is.na(AAfreq$order.aa),]
|
|
968
|
|
969 AAfreqplot = ggplot(AAfreq)
|
|
970 AAfreqplot = AAfreqplot + geom_bar(aes( x=factor(reorder(Var1, order.aa)), y = freq_perc, fill = Sample), stat='identity', position='dodge' )
|
|
971 AAfreqplot = AAfreqplot + annotate("rect", xmin = 0.5, xmax = 2.5, ymin = 0, ymax = Inf, fill = "red", alpha = 0.2)
|
|
972 AAfreqplot = AAfreqplot + annotate("rect", xmin = 3.5, xmax = 4.5, ymin = 0, ymax = Inf, fill = "blue", alpha = 0.2)
|
|
973 AAfreqplot = AAfreqplot + annotate("rect", xmin = 5.5, xmax = 6.5, ymin = 0, ymax = Inf, fill = "blue", alpha = 0.2)
|
|
974 AAfreqplot = AAfreqplot + annotate("rect", xmin = 6.5, xmax = 7.5, ymin = 0, ymax = Inf, fill = "red", alpha = 0.2)
|
8
|
975 AAfreqplot = AAfreqplot + ggtitle("Amino Acid Composition in the CDR3") + xlab("Amino Acid, from Hydrophilic (left) to Hydrophobic (right)") + ylab("Percentage") + scale_fill_manual(values=sample.colors)
|
32
|
976 AAfreqplot = AAfreqplot + theme(panel.background = element_rect(fill = "white", colour="black"),text = element_text(size=15, colour="black"), panel.grid.major.y = element_line(colour = "black"), panel.grid.major.x = element_blank())
|
5
|
977
|
|
978 png("AAComposition.png",width = 1280, height = 720)
|
|
979 AAfreqplot
|
|
980 dev.off()
|
32
|
981
|
|
982 ggsave("AAComposition.pdf", AAfreqplot, width=12, height=7)
|
|
983
|
18
|
984 write.table(AAfreq, "AAComposition.txt" , sep="\t",quote=F,na="-",row.names=F,col.names=T)
|
5
|
985
|
8
|
986 # ---------------------- AA median CDR3 length ----------------------
|
5
|
987
|
24
|
988 median.aa.l = data.frame(data.table(PRODF)[, list(median=as.double(median(as.numeric(.SD$CDR3.Length, na.rm=T), na.rm=T))), by=c("Sample")])
|
|
989 write.table(median.aa.l, "AAMedianBySample.txt" , sep="\t",quote=F,na="-",row.names=F,col.names=F)
|
8
|
990
|
25
|
991
|
|
992 #generate the "Sequences that are present in more than one replicate" dataset
|
|
993 clonaltype.in.replicates = inputdata
|
38
|
994 clonaltype.in.replicates = clonaltype.in.replicates[clonaltype.in.replicates$Functionality %in% c("productive (see comment)","productive"),]
|
41
|
995 clonaltype.in.replicates = clonaltype.in.replicates[!(is.na(clonaltype.in.replicates$ID) | is.na(clonaltype.in.replicates$Top.V.Gene) | is.na(clonaltype.in.replicates$Top.J.Gene)),]
|
25
|
996 clonaltype = unlist(strsplit(clonaltype, ","))
|
37
|
997
|
|
998 clonaltype.in.replicates$clonaltype = do.call(paste, c(clonaltype.in.replicates[c(clonaltype, "Replicate")], sep = ":"))
|
|
999
|
|
1000 clonaltype.in.replicates = clonaltype.in.replicates[!duplicated(clonaltype.in.replicates$clonaltype),]
|
|
1001
|
25
|
1002 clonaltype = clonaltype[-which(clonaltype == "Sample")]
|
|
1003
|
|
1004 clonaltype.in.replicates$clonaltype = do.call(paste, c(clonaltype.in.replicates[clonaltype], sep = ":"))
|
|
1005 clonaltype.in.replicates = clonaltype.in.replicates[,c("clonaltype","Replicate", "ID", "Sequence", "Sample")]
|
|
1006
|
41
|
1007
|
|
1008 write.table(clonaltype.in.replicates, "clonaltypes_replicates_before_table.txt" , sep="\t",quote=F,na="-",row.names=F,col.names=T)
|
|
1009
|
25
|
1010 clonaltype.counts = data.frame(table(clonaltype.in.replicates$clonaltype))
|
41
|
1011
|
|
1012 write.table(clonaltype.counts, "clonaltypes_counts.txt" , sep="\t",quote=F,na="-",row.names=F,col.names=T)
|
|
1013
|
25
|
1014 names(clonaltype.counts) = c("clonaltype", "coincidence")
|
|
1015
|
|
1016 clonaltype.counts = clonaltype.counts[clonaltype.counts$coincidence > 1,]
|
|
1017
|
|
1018 clonaltype.in.replicates = clonaltype.in.replicates[clonaltype.in.replicates$clonaltype %in% clonaltype.counts$clonaltype,]
|
|
1019 clonaltype.in.replicates = merge(clonaltype.in.replicates, clonaltype.counts, by="clonaltype")
|
37
|
1020 clonaltype.in.replicates = clonaltype.in.replicates[order(-clonaltype.in.replicates$coincidence, clonaltype.in.replicates$clonaltype, clonaltype.in.replicates$Replicate),c("coincidence","clonaltype", "Sample", "Replicate", "ID", "Sequence")]
|
|
1021
|
25
|
1022
|
|
1023 write.table(clonaltype.in.replicates, "clonaltypes_replicates.txt" , sep="\t",quote=F,na="-",row.names=F,col.names=T)
|
|
1024
|
|
1025
|
|
1026
|
|
1027
|
|
1028
|
|
1029
|
|
1030
|
|
1031
|
|
1032
|
|
1033
|
|
1034
|
|
1035
|
|
1036
|
|
1037
|
|
1038
|
|
1039
|
|
1040
|
|
1041
|
|
1042
|
|
1043
|
|
1044
|
|
1045
|
|
1046
|