0
|
1 #!/usr/bin/env python
|
|
2 #
|
|
3 # Data manager for downloading Plant Tribes scaffolds data.
|
|
4 import argparse
|
|
5 import json
|
|
6 import os
|
|
7 import shutil
|
|
8 import sys
|
|
9 import tarfile
|
|
10 import urllib2
|
|
11 import zipfile
|
|
12
|
|
13
|
|
14 DEFAULT_DATA_TABLE_NAMES = ["plant_tribes_scaffolds"]
|
|
15
|
|
16
|
|
17 def add_data_table_entry(data_manager_dict, data_table_name, data_table_entry):
|
|
18 data_manager_dict['data_tables'] = data_manager_dict.get('data_tables', {})
|
|
19 data_manager_dict['data_tables'][data_table_name] = data_manager_dict['data_tables'].get(data_table_name, [])
|
|
20 data_manager_dict['data_tables'][data_table_name].append(data_table_entry)
|
|
21 return data_manager_dict
|
|
22
|
|
23
|
|
24 def make_directory(dir):
|
|
25 if not os.path.exists(dir):
|
|
26 os.makedirs(dir)
|
|
27
|
|
28
|
|
29 def remove_directory(dir):
|
|
30 if os.path.exists(dir):
|
|
31 shutil.rmtree(dir)
|
|
32
|
|
33
|
|
34 def url_download(target_directory, url, description, data_table_names=DEFAULT_DATA_TABLE_NAMES):
|
|
35 work_directory = os.path.abspath(os.path.join(os.getcwd(), 'scaffolds'))
|
|
36 make_directory(work_directory)
|
|
37 file_path = os.path.join(work_directory, os.path.basename(url))
|
|
38 src = None
|
|
39 dst = None
|
|
40 try:
|
|
41 req = urllib2.Request(url)
|
|
42 src = urllib2.urlopen(req)
|
|
43 dst = open(file_path, 'wb')
|
|
44 while True:
|
|
45 chunk = src.read(2**10)
|
|
46 if chunk:
|
|
47 dst.write(chunk)
|
|
48 else:
|
|
49 break
|
|
50 except Exception, e:
|
|
51 print >>sys.stderr, str(e)
|
|
52 finally:
|
|
53 if src:
|
|
54 src.close()
|
|
55 if dst:
|
|
56 dst.close()
|
|
57 if tarfile.is_tarfile(file_path):
|
|
58 fh = tarfile.open(file_path, 'r:*')
|
|
59 elif zipfile.is_zipfile(file_path):
|
|
60 fh = zipfile.ZipFile(file_path, 'r')
|
|
61 else:
|
|
62 return
|
|
63 fh.extractall(work_directory)
|
|
64 os.remove(file_path)
|
|
65 # Move the scaffolds data files into defined output directory.
|
|
66 for filename in os.listdir(work_directory):
|
|
67 shutil.move(os.path.join(work_directory, filename), target_directory)
|
|
68 remove_directory(work_directory)
|
|
69 data_manager_dict = {}
|
|
70 # Populate the data table, there should be a single entry in target_directory.
|
|
71 for file_path in os.listdir(target_directory):
|
|
72 full_path = os.path.abspath(os.path.join(target_directory, file_path))
|
|
73 entry_name = "%s" % os.path.basename(file_path)
|
|
74 data_table_entry = dict(value=entry_name, name=entry_name, path=full_path, description=description)
|
|
75 for data_table_name in data_table_names:
|
|
76 data_manager_dict = add_data_table_entry(data_manager_dict, data_table_name, data_table_entry)
|
|
77 return data_manager_dict
|
|
78
|
|
79
|
|
80 parser = argparse.ArgumentParser()
|
|
81 parser.add_argument('--description', dest='description', default=None, help='Description')
|
|
82 parser.add_argument('--name', dest='name', help='Data table entry unique ID')
|
|
83 parser.add_argument('--out_file', dest='out_file', help='JSON output file')
|
|
84 parser.add_argument('--web_url', dest='web_url', help='Web URL')
|
|
85
|
|
86 args = parser.parse_args()
|
|
87
|
|
88 # Some magic happens with tools of type "manage_data" in that the output
|
|
89 # file contains some JSON data that allows us to define the target directory.
|
|
90 params = json.loads(open(args.out_file).read())
|
|
91 target_directory = params['output_data'][0]['extra_files_path']
|
|
92 make_directory(target_directory)
|
|
93
|
|
94 if args.description is None:
|
|
95 description = ''
|
|
96 else:
|
|
97 description = args.description.strip()
|
|
98
|
|
99 # Get the scaffolds data.
|
|
100 data_manager_dict = url_download(target_directory, args.web_url, description)
|
|
101 # Write the JSON output dataset.
|
|
102 fh = open(args.out_file, 'wb')
|
|
103 fh.write(json.dumps(data_manager_dict))
|
|
104 fh.close()
|