view lda_analy.xml @ 1:f0b6217f4a0c draft default tip

planemo upload for repository https://github.com/galaxyproject/tools-devteam/tree/master/tools/lda_analysis commit a1517c9d22029095120643bbe2c8fa53754dd2b7
author devteam
date Wed, 11 Nov 2015 12:18:38 -0500
parents f38763b52f33
children
line wrap: on
line source

<tool id="lda_analy1" name="Perform LDA" version="1.0.1">
	<description>Linear Discriminant Analysis</description>
    <requirements>
      <requirement type="package" version="2.11.0">R</requirement>
    </requirements>
	<command interpreter="sh">r_wrapper.sh $script_file</command>
	<inputs>
		<param format="tabular" name="input" type="data" label="Source file"/>
		<param name="cond" type="integer" value="3" label="Number of principal components" help="See TIP below">
			<validator type="empty_field" message="Enter a valid number of principal components, see syntax below for examples"/>
		</param>

	</inputs>
	<outputs>
		<data format="txt" name="output" />
	</outputs>

	<tests>
		<test>
			<param name="input" value="matrix_generator_for_pc_and_lda_output.tabular"/>
			<output name="output" file="lda_analy_output.txt"/>
			<param name="cond" value="2"/>

		</test>
	</tests>

	<configfiles>
        	<configfile name="script_file">

        rm(list = objects() )

        ############# FORMAT X DATA #########################
        format&lt;-function(data) {
            ind=NULL
            for(i in 1 : ncol(data)){
                if (is.na(data[nrow(data),i])) {
                    ind&lt;-c(ind,i)
                }
            }
            #print(is.null(ind))
            if (!is.null(ind)) {
                data&lt;-data[,-c(ind)]
            }

            data
        }

        ########GET RESPONSES ###############################
        get_resp&lt;- function(data) {
            resp1&lt;-as.vector(data[,ncol(data)])
                resp=numeric(length(resp1))
            for (i in 1:length(resp1)) {
                if (resp1[i]=="Y ") {
                    resp[i] = 0
                }
                if (resp1[i]=="X ") {
                    resp[i] = 1
                }
            }
                return(resp)
        }

        ######## CHARS TO NUMBERS ###########################
        f_to_numbers&lt;- function(F) { 
            ind&lt;-NULL
            G&lt;-matrix(0,nrow(F), ncol(F))
            for (i in 1:nrow(F)) {
                for (j in 1:ncol(F)) {
                    G[i,j]&lt;-as.integer(F[i,j])
                }
            }
            return(G)
        }

        ###################NORMALIZING#########################
        norm &lt;- function(M, a=NULL, b=NULL) {
            C&lt;-NULL
            ind&lt;-NULL

            for (i in 1: ncol(M)) {
                if (sd(M[,i])!=0) {
                    M[,i]&lt;-(M[,i]-mean(M[,i]))/sd(M[,i])
                }
                #   else {print(mean(M[,i]))}   
            }
            return(M)
        }

        ##### LDA DIRECTIONS #################################
        lda_dec &lt;- function(data, k){
            priors=numeric(k)
            grandmean&lt;-numeric(ncol(data)-1)
            means=matrix(0,k,ncol(data)-1)
            B = matrix(0, ncol(data)-1, ncol(data)-1)
            N=nrow(data)
            for (i in 1:k){
                priors[i]=sum(data[,1]==i)/N
                grp=subset(data,data\$group==i)
                means[i,]=mean(grp[,2:ncol(data)])
                #print(means[i,])
                #print(priors[i])
                #print(priors[i]*means[i,])
                grandmean = priors[i]*means[i,] + grandmean           
            }

            for (i in 1:k) {
                B= B + priors[i]*((means[i,]-grandmean)%*%t(means[i,]-grandmean))
            }
    
            W = var(data[,2:ncol(data)])
            svdW = svd(W)
            inv_sqrtW =solve(svdW\$v %*% diag(sqrt(svdW\$d)) %*% t(svdW\$v))
            B_star= t(inv_sqrtW)%*%B%*%inv_sqrtW
            B_star_decomp = svd(B_star)
            directions  = inv_sqrtW%*%B_star_decomp\$v
            return( list(directions, B_star_decomp\$d) )                          
        }

        ################ NAIVE BAYES FOR 1D SIR OR LDA ##############
        naive_bayes_classifier &lt;- function(resp, tr_data, test_data, k=2, tau) {
            tr_data=data.frame(resp=resp, dir=tr_data)
            means=numeric(k)
            #print(k)
            cl=numeric(k)
            predclass=numeric(length(test_data))
            for (i in 1:k) {
                grp = subset(tr_data, resp==i)
                means[i] = mean(grp\$dir)
            #print(i, means[i])  
            }
            cutoff = tau*means[1]+(1-tau)*means[2] 
            #print(tau)
            #print(means)
            #print(cutoff)
            if (cutoff&gt;means[1]) {
               cl[1]=1 
               cl[2]=2
            }
            else {
               cl[1]=2 
               cl[2]=1
            }

            for (i in 1:length(test_data)) {

                if (test_data[i] &lt;= cutoff) {
                    predclass[i] = cl[1]
            }
                else {
                    predclass[i] = cl[2] 
            }  
                }
            #print(means)
            #print(mean(means))
            #X11()
            #plot(test_data,pch=predclass, col=resp) 
            predclass
        }

        ################# EXTENDED ERROR RATES #################
        ext_error_rate &lt;- function(predclass, actualclass,msg=c("you forgot the message"), pr=1) {
                 er=sum(predclass != actualclass)/length(predclass)

                 matr&lt;-data.frame(predclass=predclass,actualclass=actualclass)
                 escapes = subset(matr, actualclass==1)
                 subjects = subset(matr, actualclass==2)      
                 er_esc=sum(escapes\$predclass != escapes\$actualclass)/length(escapes\$predclass) 
                 er_subj=sum(subjects\$predclass != subjects\$actualclass)/length(subjects\$predclass)   

                 if (pr==1) {
        #             print(paste(c(msg, 'overall : ', (1-er)*100, "%."),collapse=" "))
        #             print(paste(c(msg, 'within escapes : ', (1-er_esc)*100, "%."),collapse=" "))
        #             print(paste(c(msg, 'within subjects: ', (1-er_subj)*100, "%."),collapse=" ")) 
            }
            return(c((1-er)*100, (1-er_esc)*100, (1-er_subj)*100))                                                                                    
        }

        ## Main Function ##

	files&lt;-matrix("${input}", 1,1, byrow=T)

	d&lt;-"${cond}"   # Number of PC

	tau&lt;-seq(0,1, by=0.005)
	#tau&lt;-seq(0,1, by=0.1)
	for_curve=matrix(-10, 3,length(tau))

	##############################################################

	test_data_whole_X &lt;-read.delim(files[1,1], row.names=1)

	#### FORMAT TRAINING DATA ####################################
	# get only necessary columns 

	test_data_whole_X&lt;-format(test_data_whole_X)
	oligo_labels&lt;-test_data_whole_X[1:(nrow(test_data_whole_X)-1),ncol(test_data_whole_X)]
	test_data_whole_X&lt;-test_data_whole_X[,1:(ncol(test_data_whole_X)-1)]

	X_names&lt;-colnames(test_data_whole_X)[1:ncol(test_data_whole_X)]
	test_data_whole_X&lt;-t(test_data_whole_X)
	resp&lt;-get_resp(test_data_whole_X) 
	ldaqda_resp = resp + 1
	a&lt;-sum(resp)		# Number of Subject
	b&lt;-length(resp) - a	# Number of Escape   
	## FREQUENCIES #################################################
	F&lt;-test_data_whole_X[,1:(ncol(test_data_whole_X)-1)]
	F&lt;-f_to_numbers(F)
	FN&lt;-norm(F, a, b)
	ss&lt;-svd(FN)
	eigvar&lt;-NULL
	eig&lt;-ss\$d^2

	for ( i in 1:length(ss\$d)) {
		eigvar[i]&lt;-sum(eig[1:i])/sum(eig)
	}

	#print(paste(c("Variance explained : ", eigvar[d]*100, "%"), collapse=""))
	
	Z&lt;-F%*%ss\$v

	ldaqda_data &lt;- data.frame(group=ldaqda_resp,Z[,1:d])
	lda_dir&lt;-lda_dec(ldaqda_data,2)
	train_lda_pred &lt;-Z[,1:d]%*%lda_dir[[1]]

	############# NAIVE BAYES CROSS-VALIDATION #############
	### LDA #####

	y&lt;-ldaqda_resp
	X&lt;-F
	cv&lt;-matrix(c(rep('NA',nrow(test_data_whole_X))), nrow(test_data_whole_X), length(tau))
	for (i in 1:nrow(test_data_whole_X)) {
	#	print(i)
		resp&lt;-y[-i]
		p&lt;-matrix(X[-i,], dim(X)[1]-1, dim(X)[2])
		testdata&lt;-matrix(X[i,],1,dim(X)[2])
		p1&lt;-norm(p)
		sss&lt;-svd(p1)
		pred&lt;-(p%*%sss\$v)[,1:d]
		test&lt;- (testdata%*%sss\$v)[,1:d]
		lda  &lt;- lda_dec(data.frame(group=resp,pred),2)
		pred &lt;- pred[,1:d]%*%lda[[1]][,1]
		test &lt;- test%*%lda[[1]][,1]
		test&lt;-matrix(test, 1, length(test))
		for (t in 1:length(tau)) {
			cv[i, t] &lt;- naive_bayes_classifier (resp, pred, test,k=2, tau[t]) 
		}
 	}

	for (t in 1:length(tau)) {
		tr_err&lt;-ext_error_rate(cv[,t], ldaqda_resp , c("CV"), 1)
		for_curve[1:3,t]&lt;-tr_err
	}

	dput(for_curve, file="${output}")


		</configfile>
	</configfiles>

	<help>

.. class:: infomark

**TIP:** If you want to perform Principal Component Analysis (PCA) on the give numeric input data (which corresponds to the "Source file First in "Generate A Matrix" tool), please use *Multivariate Analysis/Principal Component Analysis*

-----

.. class:: infomark

**What it does**

This tool consists of the module to perform the Linear Discriminant Analysis as described in Carrel et al., 2006 (PMID: 17009873)

*Carrel L, Park C, Tyekucheva S, Dunn J, Chiaromonte F, et al. (2006) Genomic Environment Predicts Expression Patterns on the Human 	Inactive X Chromosome. PLoS Genet 2(9): e151. doi:10.1371/journal.pgen.0020151*

-----

.. class:: warningmark

**Note**

- Output from "Generate A Matrix" tool is used as input file for this tool 
- Output of this tool contains LDA classification success rates for different values of the turning parameter tau (from 0 to 1 with 0.005 interval). This output file will be used to establish the ROC plot, and you can obtain more detail information from this plot. 


</help>

</tool>