Mercurial > repos > shellac > guppy_basecaller
comparison env/lib/python3.7/site-packages/boto/manage/volume.py @ 0:26e78fe6e8c4 draft
"planemo upload commit c699937486c35866861690329de38ec1a5d9f783"
author | shellac |
---|---|
date | Sat, 02 May 2020 07:14:21 -0400 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:26e78fe6e8c4 |
---|---|
1 # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ | |
2 # | |
3 # Permission is hereby granted, free of charge, to any person obtaining a | |
4 # copy of this software and associated documentation files (the | |
5 # "Software"), to deal in the Software without restriction, including | |
6 # without limitation the rights to use, copy, modify, merge, publish, dis- | |
7 # tribute, sublicense, and/or sell copies of the Software, and to permit | |
8 # persons to whom the Software is furnished to do so, subject to the fol- | |
9 # lowing conditions: | |
10 # | |
11 # The above copyright notice and this permission notice shall be included | |
12 # in all copies or substantial portions of the Software. | |
13 # | |
14 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | |
15 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- | |
16 # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT | |
17 # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, | |
18 # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
19 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
20 # IN THE SOFTWARE. | |
21 from __future__ import print_function | |
22 | |
23 from boto.sdb.db.model import Model | |
24 from boto.sdb.db.property import StringProperty, IntegerProperty, ListProperty, ReferenceProperty, CalculatedProperty | |
25 from boto.manage.server import Server | |
26 from boto.manage import propget | |
27 import boto.utils | |
28 import boto.ec2 | |
29 import time | |
30 import traceback | |
31 from contextlib import closing | |
32 import datetime | |
33 | |
34 | |
35 class CommandLineGetter(object): | |
36 | |
37 def get_region(self, params): | |
38 if not params.get('region', None): | |
39 prop = self.cls.find_property('region_name') | |
40 params['region'] = propget.get(prop, choices=boto.ec2.regions) | |
41 | |
42 def get_zone(self, params): | |
43 if not params.get('zone', None): | |
44 prop = StringProperty(name='zone', verbose_name='EC2 Availability Zone', | |
45 choices=self.ec2.get_all_zones) | |
46 params['zone'] = propget.get(prop) | |
47 | |
48 def get_name(self, params): | |
49 if not params.get('name', None): | |
50 prop = self.cls.find_property('name') | |
51 params['name'] = propget.get(prop) | |
52 | |
53 def get_size(self, params): | |
54 if not params.get('size', None): | |
55 prop = IntegerProperty(name='size', verbose_name='Size (GB)') | |
56 params['size'] = propget.get(prop) | |
57 | |
58 def get_mount_point(self, params): | |
59 if not params.get('mount_point', None): | |
60 prop = self.cls.find_property('mount_point') | |
61 params['mount_point'] = propget.get(prop) | |
62 | |
63 def get_device(self, params): | |
64 if not params.get('device', None): | |
65 prop = self.cls.find_property('device') | |
66 params['device'] = propget.get(prop) | |
67 | |
68 def get(self, cls, params): | |
69 self.cls = cls | |
70 self.get_region(params) | |
71 self.ec2 = params['region'].connect() | |
72 self.get_zone(params) | |
73 self.get_name(params) | |
74 self.get_size(params) | |
75 self.get_mount_point(params) | |
76 self.get_device(params) | |
77 | |
78 class Volume(Model): | |
79 | |
80 name = StringProperty(required=True, unique=True, verbose_name='Name') | |
81 region_name = StringProperty(required=True, verbose_name='EC2 Region') | |
82 zone_name = StringProperty(required=True, verbose_name='EC2 Zone') | |
83 mount_point = StringProperty(verbose_name='Mount Point') | |
84 device = StringProperty(verbose_name="Device Name", default='/dev/sdp') | |
85 volume_id = StringProperty(required=True) | |
86 past_volume_ids = ListProperty(item_type=str) | |
87 server = ReferenceProperty(Server, collection_name='volumes', | |
88 verbose_name='Server Attached To') | |
89 volume_state = CalculatedProperty(verbose_name="Volume State", | |
90 calculated_type=str, use_method=True) | |
91 attachment_state = CalculatedProperty(verbose_name="Attachment State", | |
92 calculated_type=str, use_method=True) | |
93 size = CalculatedProperty(verbose_name="Size (GB)", | |
94 calculated_type=int, use_method=True) | |
95 | |
96 @classmethod | |
97 def create(cls, **params): | |
98 getter = CommandLineGetter() | |
99 getter.get(cls, params) | |
100 region = params.get('region') | |
101 ec2 = region.connect() | |
102 zone = params.get('zone') | |
103 size = params.get('size') | |
104 ebs_volume = ec2.create_volume(size, zone.name) | |
105 v = cls() | |
106 v.ec2 = ec2 | |
107 v.volume_id = ebs_volume.id | |
108 v.name = params.get('name') | |
109 v.mount_point = params.get('mount_point') | |
110 v.device = params.get('device') | |
111 v.region_name = region.name | |
112 v.zone_name = zone.name | |
113 v.put() | |
114 return v | |
115 | |
116 @classmethod | |
117 def create_from_volume_id(cls, region_name, volume_id, name): | |
118 vol = None | |
119 ec2 = boto.ec2.connect_to_region(region_name) | |
120 rs = ec2.get_all_volumes([volume_id]) | |
121 if len(rs) == 1: | |
122 v = rs[0] | |
123 vol = cls() | |
124 vol.volume_id = v.id | |
125 vol.name = name | |
126 vol.region_name = v.region.name | |
127 vol.zone_name = v.zone | |
128 vol.put() | |
129 return vol | |
130 | |
131 def create_from_latest_snapshot(self, name, size=None): | |
132 snapshot = self.get_snapshots()[-1] | |
133 return self.create_from_snapshot(name, snapshot, size) | |
134 | |
135 def create_from_snapshot(self, name, snapshot, size=None): | |
136 if size < self.size: | |
137 size = self.size | |
138 ec2 = self.get_ec2_connection() | |
139 if self.zone_name is None or self.zone_name == '': | |
140 # deal with the migration case where the zone is not set in the logical volume: | |
141 current_volume = ec2.get_all_volumes([self.volume_id])[0] | |
142 self.zone_name = current_volume.zone | |
143 ebs_volume = ec2.create_volume(size, self.zone_name, snapshot) | |
144 v = Volume() | |
145 v.ec2 = self.ec2 | |
146 v.volume_id = ebs_volume.id | |
147 v.name = name | |
148 v.mount_point = self.mount_point | |
149 v.device = self.device | |
150 v.region_name = self.region_name | |
151 v.zone_name = self.zone_name | |
152 v.put() | |
153 return v | |
154 | |
155 def get_ec2_connection(self): | |
156 if self.server: | |
157 return self.server.ec2 | |
158 if not hasattr(self, 'ec2') or self.ec2 is None: | |
159 self.ec2 = boto.ec2.connect_to_region(self.region_name) | |
160 return self.ec2 | |
161 | |
162 def _volume_state(self): | |
163 ec2 = self.get_ec2_connection() | |
164 rs = ec2.get_all_volumes([self.volume_id]) | |
165 return rs[0].volume_state() | |
166 | |
167 def _attachment_state(self): | |
168 ec2 = self.get_ec2_connection() | |
169 rs = ec2.get_all_volumes([self.volume_id]) | |
170 return rs[0].attachment_state() | |
171 | |
172 def _size(self): | |
173 if not hasattr(self, '__size'): | |
174 ec2 = self.get_ec2_connection() | |
175 rs = ec2.get_all_volumes([self.volume_id]) | |
176 self.__size = rs[0].size | |
177 return self.__size | |
178 | |
179 def install_xfs(self): | |
180 if self.server: | |
181 self.server.install('xfsprogs xfsdump') | |
182 | |
183 def get_snapshots(self): | |
184 """ | |
185 Returns a list of all completed snapshots for this volume ID. | |
186 """ | |
187 ec2 = self.get_ec2_connection() | |
188 rs = ec2.get_all_snapshots() | |
189 all_vols = [self.volume_id] + self.past_volume_ids | |
190 snaps = [] | |
191 for snapshot in rs: | |
192 if snapshot.volume_id in all_vols: | |
193 if snapshot.progress == '100%': | |
194 snapshot.date = boto.utils.parse_ts(snapshot.start_time) | |
195 snapshot.keep = True | |
196 snaps.append(snapshot) | |
197 snaps.sort(cmp=lambda x, y: cmp(x.date, y.date)) | |
198 return snaps | |
199 | |
200 def attach(self, server=None): | |
201 if self.attachment_state == 'attached': | |
202 print('already attached') | |
203 return None | |
204 if server: | |
205 self.server = server | |
206 self.put() | |
207 ec2 = self.get_ec2_connection() | |
208 ec2.attach_volume(self.volume_id, self.server.instance_id, self.device) | |
209 | |
210 def detach(self, force=False): | |
211 state = self.attachment_state | |
212 if state == 'available' or state is None or state == 'detaching': | |
213 print('already detached') | |
214 return None | |
215 ec2 = self.get_ec2_connection() | |
216 ec2.detach_volume(self.volume_id, self.server.instance_id, self.device, force) | |
217 self.server = None | |
218 self.put() | |
219 | |
220 def checkfs(self, use_cmd=None): | |
221 if self.server is None: | |
222 raise ValueError('server attribute must be set to run this command') | |
223 # detemine state of file system on volume, only works if attached | |
224 if use_cmd: | |
225 cmd = use_cmd | |
226 else: | |
227 cmd = self.server.get_cmdshell() | |
228 status = cmd.run('xfs_check %s' % self.device) | |
229 if not use_cmd: | |
230 cmd.close() | |
231 if status[1].startswith('bad superblock magic number 0'): | |
232 return False | |
233 return True | |
234 | |
235 def wait(self): | |
236 if self.server is None: | |
237 raise ValueError('server attribute must be set to run this command') | |
238 with closing(self.server.get_cmdshell()) as cmd: | |
239 # wait for the volume device to appear | |
240 cmd = self.server.get_cmdshell() | |
241 while not cmd.exists(self.device): | |
242 boto.log.info('%s still does not exist, waiting 10 seconds' % self.device) | |
243 time.sleep(10) | |
244 | |
245 def format(self): | |
246 if self.server is None: | |
247 raise ValueError('server attribute must be set to run this command') | |
248 status = None | |
249 with closing(self.server.get_cmdshell()) as cmd: | |
250 if not self.checkfs(cmd): | |
251 boto.log.info('make_fs...') | |
252 status = cmd.run('mkfs -t xfs %s' % self.device) | |
253 return status | |
254 | |
255 def mount(self): | |
256 if self.server is None: | |
257 raise ValueError('server attribute must be set to run this command') | |
258 boto.log.info('handle_mount_point') | |
259 with closing(self.server.get_cmdshell()) as cmd: | |
260 cmd = self.server.get_cmdshell() | |
261 if not cmd.isdir(self.mount_point): | |
262 boto.log.info('making directory') | |
263 # mount directory doesn't exist so create it | |
264 cmd.run("mkdir %s" % self.mount_point) | |
265 else: | |
266 boto.log.info('directory exists already') | |
267 status = cmd.run('mount -l') | |
268 lines = status[1].split('\n') | |
269 for line in lines: | |
270 t = line.split() | |
271 if t and t[2] == self.mount_point: | |
272 # something is already mounted at the mount point | |
273 # unmount that and mount it as /tmp | |
274 if t[0] != self.device: | |
275 cmd.run('umount %s' % self.mount_point) | |
276 cmd.run('mount %s /tmp' % t[0]) | |
277 cmd.run('chmod 777 /tmp') | |
278 break | |
279 # Mount up our new EBS volume onto mount_point | |
280 cmd.run("mount %s %s" % (self.device, self.mount_point)) | |
281 cmd.run('xfs_growfs %s' % self.mount_point) | |
282 | |
283 def make_ready(self, server): | |
284 self.server = server | |
285 self.put() | |
286 self.install_xfs() | |
287 self.attach() | |
288 self.wait() | |
289 self.format() | |
290 self.mount() | |
291 | |
292 def freeze(self): | |
293 if self.server: | |
294 return self.server.run("/usr/sbin/xfs_freeze -f %s" % self.mount_point) | |
295 | |
296 def unfreeze(self): | |
297 if self.server: | |
298 return self.server.run("/usr/sbin/xfs_freeze -u %s" % self.mount_point) | |
299 | |
300 def snapshot(self): | |
301 # if this volume is attached to a server | |
302 # we need to freeze the XFS file system | |
303 try: | |
304 self.freeze() | |
305 if self.server is None: | |
306 snapshot = self.get_ec2_connection().create_snapshot(self.volume_id) | |
307 else: | |
308 snapshot = self.server.ec2.create_snapshot(self.volume_id) | |
309 boto.log.info('Snapshot of Volume %s created: %s' % (self.name, snapshot)) | |
310 except Exception: | |
311 boto.log.info('Snapshot error') | |
312 boto.log.info(traceback.format_exc()) | |
313 finally: | |
314 status = self.unfreeze() | |
315 return status | |
316 | |
317 def get_snapshot_range(self, snaps, start_date=None, end_date=None): | |
318 l = [] | |
319 for snap in snaps: | |
320 if start_date and end_date: | |
321 if snap.date >= start_date and snap.date <= end_date: | |
322 l.append(snap) | |
323 elif start_date: | |
324 if snap.date >= start_date: | |
325 l.append(snap) | |
326 elif end_date: | |
327 if snap.date <= end_date: | |
328 l.append(snap) | |
329 else: | |
330 l.append(snap) | |
331 return l | |
332 | |
333 def trim_snapshots(self, delete=False): | |
334 """ | |
335 Trim the number of snapshots for this volume. This method always | |
336 keeps the oldest snapshot. It then uses the parameters passed in | |
337 to determine how many others should be kept. | |
338 | |
339 The algorithm is to keep all snapshots from the current day. Then | |
340 it will keep the first snapshot of the day for the previous seven days. | |
341 Then, it will keep the first snapshot of the week for the previous | |
342 four weeks. After than, it will keep the first snapshot of the month | |
343 for as many months as there are. | |
344 | |
345 """ | |
346 snaps = self.get_snapshots() | |
347 # Always keep the oldest and the newest | |
348 if len(snaps) <= 2: | |
349 return snaps | |
350 snaps = snaps[1:-1] | |
351 now = datetime.datetime.now(snaps[0].date.tzinfo) | |
352 midnight = datetime.datetime(year=now.year, month=now.month, | |
353 day=now.day, tzinfo=now.tzinfo) | |
354 # Keep the first snapshot from each day of the previous week | |
355 one_week = datetime.timedelta(days=7, seconds=60*60) | |
356 print(midnight-one_week, midnight) | |
357 previous_week = self.get_snapshot_range(snaps, midnight-one_week, midnight) | |
358 print(previous_week) | |
359 if not previous_week: | |
360 return snaps | |
361 current_day = None | |
362 for snap in previous_week: | |
363 if current_day and current_day == snap.date.day: | |
364 snap.keep = False | |
365 else: | |
366 current_day = snap.date.day | |
367 # Get ourselves onto the next full week boundary | |
368 if previous_week: | |
369 week_boundary = previous_week[0].date | |
370 if week_boundary.weekday() != 0: | |
371 delta = datetime.timedelta(days=week_boundary.weekday()) | |
372 week_boundary = week_boundary - delta | |
373 # Keep one within this partial week | |
374 partial_week = self.get_snapshot_range(snaps, week_boundary, previous_week[0].date) | |
375 if len(partial_week) > 1: | |
376 for snap in partial_week[1:]: | |
377 snap.keep = False | |
378 # Keep the first snapshot of each week for the previous 4 weeks | |
379 for i in range(0, 4): | |
380 weeks_worth = self.get_snapshot_range(snaps, week_boundary-one_week, week_boundary) | |
381 if len(weeks_worth) > 1: | |
382 for snap in weeks_worth[1:]: | |
383 snap.keep = False | |
384 week_boundary = week_boundary - one_week | |
385 # Now look through all remaining snaps and keep one per month | |
386 remainder = self.get_snapshot_range(snaps, end_date=week_boundary) | |
387 current_month = None | |
388 for snap in remainder: | |
389 if current_month and current_month == snap.date.month: | |
390 snap.keep = False | |
391 else: | |
392 current_month = snap.date.month | |
393 if delete: | |
394 for snap in snaps: | |
395 if not snap.keep: | |
396 boto.log.info('Deleting %s(%s) for %s' % (snap, snap.date, self.name)) | |
397 snap.delete() | |
398 return snaps | |
399 | |
400 def grow(self, size): | |
401 pass | |
402 | |
403 def copy(self, snapshot): | |
404 pass | |
405 | |
406 def get_snapshot_from_date(self, date): | |
407 pass | |
408 | |
409 def delete(self, delete_ebs_volume=False): | |
410 if delete_ebs_volume: | |
411 self.detach() | |
412 ec2 = self.get_ec2_connection() | |
413 ec2.delete_volume(self.volume_id) | |
414 super(Volume, self).delete() | |
415 | |
416 def archive(self): | |
417 # snapshot volume, trim snaps, delete volume-id | |
418 pass | |
419 | |
420 |