Mercurial > repos > devteam > logistic_regression_vif
comparison logistic_regression_vif.py @ 0:bd196d7c1ca9 draft default tip
Imported from capsule None
| author | devteam |
|---|---|
| date | Tue, 01 Apr 2014 10:51:26 -0400 |
| parents | |
| children |
comparison
equal
deleted
inserted
replaced
| -1:000000000000 | 0:bd196d7c1ca9 |
|---|---|
| 1 #!/usr/bin/env python | |
| 2 | |
| 3 import sys | |
| 4 from rpy import * | |
| 5 import numpy | |
| 6 | |
| 7 def stop_err(msg): | |
| 8 sys.stderr.write(msg) | |
| 9 sys.exit() | |
| 10 | |
| 11 infile = sys.argv[1] | |
| 12 y_col = int(sys.argv[2])-1 | |
| 13 x_cols = sys.argv[3].split(',') | |
| 14 outfile = sys.argv[4] | |
| 15 | |
| 16 | |
| 17 print "Predictor columns: %s; Response column: %d" % ( x_cols, y_col+1 ) | |
| 18 fout = open(outfile,'w') | |
| 19 elems = [] | |
| 20 for i, line in enumerate( file( infile ) ): | |
| 21 line = line.rstrip('\r\n') | |
| 22 if len( line )>0 and not line.startswith( '#' ): | |
| 23 elems = line.split( '\t' ) | |
| 24 break | |
| 25 if i == 30: | |
| 26 break # Hopefully we'll never get here... | |
| 27 | |
| 28 if len( elems )<1: | |
| 29 stop_err( "The data in your input dataset is either missing or not formatted properly." ) | |
| 30 | |
| 31 y_vals = [] | |
| 32 x_vals = [] | |
| 33 | |
| 34 for k, col in enumerate(x_cols): | |
| 35 x_cols[k] = int(col)-1 | |
| 36 x_vals.append([]) | |
| 37 | |
| 38 NA = 'NA' | |
| 39 for ind, line in enumerate( file( infile )): | |
| 40 if line and not line.startswith( '#' ): | |
| 41 try: | |
| 42 fields = line.split("\t") | |
| 43 try: | |
| 44 yval = float(fields[y_col]) | |
| 45 except: | |
| 46 yval = r('NA') | |
| 47 y_vals.append(yval) | |
| 48 for k, col in enumerate(x_cols): | |
| 49 try: | |
| 50 xval = float(fields[col]) | |
| 51 except: | |
| 52 xval = r('NA') | |
| 53 x_vals[k].append(xval) | |
| 54 except: | |
| 55 pass | |
| 56 | |
| 57 x_vals1 = numpy.asarray(x_vals).transpose() | |
| 58 | |
| 59 check1 = 0 | |
| 60 check0 = 0 | |
| 61 for i in y_vals: | |
| 62 if i == 1: | |
| 63 check1 = 1 | |
| 64 if i == 0: | |
| 65 check0 = 1 | |
| 66 if check1 == 0 or check0 == 0: | |
| 67 sys.exit("Warning: logistic regression must have at least two classes") | |
| 68 | |
| 69 for i in y_vals: | |
| 70 if i not in [1, 0, r('NA')]: | |
| 71 print >> fout, str(i) | |
| 72 sys.exit("Warning: the current version of this tool can run only with two classes and need to be labeled as 0 and 1.") | |
| 73 | |
| 74 dat = r.list(x=array(x_vals1), y=y_vals) | |
| 75 novif = 0 | |
| 76 set_default_mode(NO_CONVERSION) | |
| 77 try: | |
| 78 linear_model = r.glm(r("y ~ x"), data=r.na_exclude(dat), family="binomial") | |
| 79 except RException, rex: | |
| 80 stop_err("Error performing logistic regression on the input data.\nEither the response column or one of the predictor columns contain only non-numeric or invalid values.") | |
| 81 if len(x_cols)>1: | |
| 82 try: | |
| 83 r('suppressPackageStartupMessages(library(car))') | |
| 84 r.assign('dat', dat) | |
| 85 r.assign('ncols', len(x_cols)) | |
| 86 vif = r.vif(r('glm(dat$y ~ ., data = na.exclude(data.frame(as.matrix(dat$x,ncol=ncols))->datx), family="binomial")')) | |
| 87 except RException, rex: | |
| 88 print rex | |
| 89 else: | |
| 90 novif = 1 | |
| 91 | |
| 92 set_default_mode(BASIC_CONVERSION) | |
| 93 | |
| 94 coeffs = linear_model.as_py()['coefficients'] | |
| 95 null_deviance = linear_model.as_py()['null.deviance'] | |
| 96 residual_deviance = linear_model.as_py()['deviance'] | |
| 97 yintercept = coeffs['(Intercept)'] | |
| 98 summary = r.summary(linear_model) | |
| 99 co = summary.get('coefficients', 'NA') | |
| 100 """ | |
| 101 if len(co) != len(x_vals)+1: | |
| 102 stop_err("Stopped performing logistic regression on the input data, since one of the predictor columns contains only non-numeric or invalid values.") | |
| 103 """ | |
| 104 | |
| 105 try: | |
| 106 yintercept = r.round(float(yintercept), digits=10) | |
| 107 pvaly = r.round(float(co[0][3]), digits=10) | |
| 108 except: | |
| 109 pass | |
| 110 print >> fout, "response column\tc%d" % (y_col+1) | |
| 111 tempP = [] | |
| 112 for i in x_cols: | |
| 113 tempP.append('c'+str(i+1)) | |
| 114 tempP = ','.join(tempP) | |
| 115 print >> fout, "predictor column(s)\t%s" % (tempP) | |
| 116 print >> fout, "Y-intercept\t%s" % (yintercept) | |
| 117 print >> fout, "p-value (Y-intercept)\t%s" % (pvaly) | |
| 118 | |
| 119 if len(x_vals) == 1: #Simple linear regression case with 1 predictor variable | |
| 120 try: | |
| 121 slope = r.round(float(coeffs['x']), digits=10) | |
| 122 except: | |
| 123 slope = 'NA' | |
| 124 try: | |
| 125 pval = r.round(float(co[1][3]), digits=10) | |
| 126 except: | |
| 127 pval = 'NA' | |
| 128 print >> fout, "Slope (c%d)\t%s" % ( x_cols[0]+1, slope ) | |
| 129 print >> fout, "p-value (c%d)\t%s" % ( x_cols[0]+1, pval ) | |
| 130 else: #Multiple regression case with >1 predictors | |
| 131 ind = 1 | |
| 132 while ind < len(coeffs.keys()): | |
| 133 try: | |
| 134 slope = r.round(float(coeffs['x'+str(ind)]), digits=10) | |
| 135 except: | |
| 136 slope = 'NA' | |
| 137 print >> fout, "Slope (c%d)\t%s" % ( x_cols[ind-1]+1, slope ) | |
| 138 try: | |
| 139 pval = r.round(float(co[ind][3]), digits=10) | |
| 140 except: | |
| 141 pval = 'NA' | |
| 142 print >> fout, "p-value (c%d)\t%s" % ( x_cols[ind-1]+1, pval ) | |
| 143 ind += 1 | |
| 144 | |
| 145 rsq = summary.get('r.squared','NA') | |
| 146 | |
| 147 try: | |
| 148 rsq = r.round(float((null_deviance-residual_deviance)/null_deviance), digits=5) | |
| 149 null_deviance = r.round(float(null_deviance), digits=5) | |
| 150 residual_deviance = r.round(float(residual_deviance), digits=5) | |
| 151 except: | |
| 152 pass | |
| 153 | |
| 154 print >> fout, "Null deviance\t%s" % (null_deviance) | |
| 155 print >> fout, "Residual deviance\t%s" % (residual_deviance) | |
| 156 print >> fout, "pseudo R-squared\t%s" % (rsq) | |
| 157 print >> fout, "\n" | |
| 158 print >> fout, 'vif' | |
| 159 | |
| 160 if novif == 0: | |
| 161 py_vif = vif.as_py() | |
| 162 count = 0 | |
| 163 for i in sorted(py_vif.keys()): | |
| 164 print >> fout, 'c'+str(x_cols[count]+1), str(py_vif[i]) | |
| 165 count += 1 | |
| 166 elif novif == 1: | |
| 167 print >> fout, "vif can calculate only when model have more than 1 predictor" |
