0
|
1 '''
|
|
2 Created on 31 dec. 2014
|
|
3
|
|
4 @author: lukas007
|
|
5 '''
|
|
6 import shutil
|
|
7 import subprocess
|
|
8 import csv
|
|
9 from collections import OrderedDict
|
|
10
|
|
11 def copy_dir(src, dst):
|
|
12 shutil.copytree(src, dst)
|
|
13
|
9
|
14 def log_message(log_file, log_message):
|
|
15 with open(log_file, "a") as text_file:
|
|
16 text_file.write(log_message + "\n")
|
|
17
|
0
|
18 def copy_file(src, dst):
|
|
19 shutil.copy(src, dst)
|
|
20
|
|
21 def get_process_list():
|
|
22 p = subprocess.Popen(['ps', '-A'], stdout=subprocess.PIPE)
|
|
23 out, err = p.communicate()
|
|
24 return out.splitlines()
|
|
25
|
|
26 def get_process_pid(process_name):
|
|
27 pid = -1
|
|
28 for line in get_process_list():
|
|
29 if process_name in line:
|
|
30 pid = int(line.split(None, 1)[0])
|
|
31 return pid
|
|
32
|
|
33
|
|
34 def get_as_dict(in_tsv):
|
|
35 '''
|
|
36 Generic method to parse a tab-separated file returning a dictionary with named columns
|
|
37 @param in_tsv: input filename to be parsed
|
|
38 '''
|
|
39 data = list(csv.reader(open(in_tsv, 'rU'), delimiter='\t'))
|
|
40 header = data.pop(0)
|
|
41 # Create dictionary with column name as key
|
|
42 output = {}
|
|
43 for index in xrange(len(header)):
|
|
44 output[header[index]] = [row[index] for row in data]
|
|
45 return output
|
|
46
|
|
47 def save_dict_as_tsv(dict, out_tsv):
|
|
48 '''
|
|
49 Writes tab-separated data to file
|
|
50 @param data: dictionary containing merged dataset
|
|
51 @param out_tsv: output tsv file
|
|
52 '''
|
|
53
|
|
54 # Open output file for writing
|
|
55 out_file = open(out_tsv, 'wb')
|
|
56 output_writer = csv.writer(out_file, delimiter="\t")
|
|
57
|
|
58 # Write headers
|
|
59 output_writer.writerow(list(dict.keys()))
|
|
60
|
|
61 # Write
|
|
62 for record_index in xrange(len(dict[dict.keys()[0]])):
|
|
63 row = [dict[k][record_index] for k in dict]
|
|
64 output_writer.writerow(row)
|
|
65
|
|
66
|
|
67
|
|
68
|
|
69 def get_nist_out_as_dict(nist_result_file):
|
|
70 '''
|
|
71 Method to parse NIST specific output into a dictionary.
|
|
72 @param nist_result_file: result file as produced by NIST nistms$.exe
|
|
73 '''
|
|
74 # Create dictionary with column name as key
|
|
75 output = OrderedDict()
|
|
76 output['id'] = []
|
|
77 output['compound_name'] = []
|
|
78 output['formula'] = []
|
|
79 output['lib_name'] = []
|
|
80 output['id_in_lib'] = []
|
|
81 output['mf'] = []
|
|
82 output['rmf'] = []
|
|
83 output['prob'] = []
|
|
84 output['cas'] = []
|
|
85 output['mw'] = []
|
|
86
|
|
87
|
|
88 for line in open(nist_result_file):
|
|
89 row = line.split('<<')
|
|
90 if row[0].startswith('Unknown'):
|
|
91 title_row = row[0]
|
|
92 continue
|
|
93 elif row[0].startswith('Hit'):
|
|
94 hit = row
|
|
95
|
|
96 output['id'].append(title_row.split(': ')[1].split(' ')[0])
|
|
97 output['compound_name'].append((hit[1].split('>>')[0]).decode('utf-8', errors='replace')) # see http://blog.webforefront.com/archives/2011/02/python_ascii_co.html
|
|
98 output['formula'].append(hit[2].split('>>')[0])
|
|
99 output['lib_name'].append(hit[3].split('>>')[0])
|
|
100
|
|
101 other_fields_list = (hit[2].split('>>')[1] + hit[3].split('>>')[1]).split(';')
|
|
102 count = 0
|
|
103 for field in other_fields_list:
|
|
104 if field.startswith(' MF: '):
|
|
105 count += 1
|
|
106 output['mf'].append(field.split('MF: ')[1])
|
|
107 elif field.startswith(' RMF: '):
|
|
108 count += 1
|
|
109 output['rmf'].append(field.split('RMF: ')[1])
|
|
110 elif field.startswith(' Prob: '):
|
|
111 count += 1
|
|
112 output['prob'].append(field.split('Prob: ')[1])
|
|
113 elif field.startswith(' CAS:'):
|
|
114 count += 1
|
|
115 output['cas'].append(field.split('CAS:')[1])
|
|
116 elif field.startswith(' Mw: '):
|
|
117 count += 1
|
|
118 output['mw'].append(field.split('Mw: ')[1])
|
|
119 elif field.startswith(' Id: '):
|
|
120 count += 1
|
|
121 output['id_in_lib'].append(field.split('Id: ')[1][0:-2]) # the [0:-2] is to avoid the last 2 characters, namely a '.' and a \n
|
|
122 elif field != '' and field != ' Lib: ':
|
|
123 raise Exception('Error: unexpected field in NIST output: ' + field)
|
|
124
|
|
125 if count != 6:
|
|
126 raise Exception('Error: did not find all expected fields in NIST output')
|
|
127
|
|
128 return output
|
|
129
|
|
130 def get_spectra_file_as_dict(spectrum_file):
|
|
131 '''
|
|
132 Method to parse spectra file in NIST MSP input format into a dictionary.
|
|
133 The idea is to parse the following :
|
|
134
|
|
135 Name: spectrum1
|
|
136 DB#: 1
|
|
137 Num Peaks: 87
|
|
138 14 8; 15 15; 27 18; 28 15; 29 15;
|
|
139 30 11; 32 19; 39 32; 40 12; 41 68;
|
|
140
|
|
141 into:
|
|
142
|
|
143 dict['spectrum1'] = "14 8; 15 15; 27 18; 28 15; 29 15; 30 11; 32 19; 39 32; 40 12; 41 68;"
|
|
144
|
|
145 @param spectrum_file: spectra file in MSP format (e.g. also the format returned by MsClust)
|
|
146 '''
|
|
147
|
|
148 output = OrderedDict()
|
|
149 name = ''
|
|
150 spectrum = ''
|
|
151 for line in open(spectrum_file):
|
|
152 if line.startswith('Name: '):
|
|
153 if name != '':
|
|
154 # store spectrum:
|
|
155 output[name] = spectrum
|
|
156 name = line.split('Name: ')[1].replace('\n','')
|
|
157 spectrum = ''
|
|
158 elif line[0].isdigit():
|
|
159 # parse spectra:
|
|
160 spectrum += line.replace('\n','')
|
|
161
|
|
162 # store also last spectrum:
|
|
163 output[name] = spectrum
|
|
164
|
|
165 return output
|
|
166 |