| 
1
 | 
     1 #!/usr/bin/python
 | 
| 
 | 
     2 	
 | 
| 
 | 
     3 """
 | 
| 
 | 
     4 ****************************** vdb_retrieval.py ******************************
 | 
| 
 | 
     5  VDBRetrieval() instance called in two stages:
 | 
| 
 | 
     6  1) by tool's versioned_data.xml form (dynamic_option field) 
 | 
| 
 | 
     7  2) by its executable versioned_data.py script.
 | 
| 
 | 
     8 
 | 
| 
 | 
     9 """
 | 
| 
 | 
    10 
 | 
| 
 | 
    11 import os, sys, glob, time
 | 
| 
 | 
    12 import string
 | 
| 
 | 
    13 from random import choice
 | 
| 
 | 
    14 
 | 
| 
 | 
    15 from bioblend.galaxy import GalaxyInstance
 | 
| 
 | 
    16 from requests.exceptions import ChunkedEncodingError
 | 
| 
 | 
    17 from requests.exceptions import ConnectionError
 | 
| 
 | 
    18 
 | 
| 
 | 
    19 import urllib2
 | 
| 
 | 
    20 import json
 | 
| 
 | 
    21 import vdb_common
 | 
| 
 | 
    22 
 | 
| 
 | 
    23 # Store these values in python/galaxy environment variables?
 | 
| 
 | 
    24 VDB_DATA_LIBRARY = 'Versioned Data'
 | 
| 
 | 
    25 VDB_WORKFLOW_CACHE_FOLDER_NAME = 'Workflow cache'
 | 
| 
 | 
    26 VDB_CACHED_DATA_LABEL = 'Cached data'
 | 
| 
 | 
    27 
 | 
| 
 | 
    28 # Don't forget to add "versionedata@localhost.com" to galaxy config admin_users list.
 | 
| 
 | 
    29 
 | 
| 
 | 
    30 VDB_ADMIN_API_USER = 'versioneddata'
 | 
| 
 | 
    31 VDB_ADMIN_API_EMAIL = 'versioneddata@localhost.com'
 | 
| 
 | 
    32 VDB_ADMIN_API_KEY_PATH = os.path.join(os.path.dirname(sys._getframe().f_code.co_filename), 'versioneddata_api_key.txt')
 | 
| 
 | 
    33 			
 | 
| 
 | 
    34 #kipper, git, folder and other registered handlers
 | 
| 
 | 
    35 VDB_STORAGE_OPTIONS = 'kipper git folder biomaj'
 | 
| 
 | 
    36 
 | 
| 
 | 
    37 # Used in versioned_data_form.py
 | 
| 
 | 
    38 VDB_DATASET_NOT_AVAILABLE = 'This database is not currently available (no items).'
 | 
| 
 | 
    39 VDB_DATA_LIBRARY_FOLDER_ERROR = 'Error: this data library folder is not configured correctly.'
 | 
| 
 | 
    40 VDB_DATA_LIBRARY_CONFIG_ERROR = 'Error: Check folder config file: '
 | 
| 
 | 
    41 		
 | 
| 
 | 
    42 
 | 
| 
 | 
    43 class VDBRetrieval(object):
 | 
| 
 | 
    44 
 | 
| 
 | 
    45 	def __init__(self, api_key=None, api_url=None):
 | 
| 
 | 
    46 		"""
 | 
| 
 | 
    47 		This gets either trans.x.y from <code file="..."> call in versioned_data.xml,
 | 
| 
 | 
    48 		or it gets a call with api_key and api_url from versioned_data.py
 | 
| 
 | 
    49 
 | 
| 
 | 
    50 		@param api_key_path string File path to temporary file containing user's galaxy api_key
 | 
| 
 | 
    51 		@param api_url string contains http://[ip]:[port] for handling galaxy api calls.  
 | 
| 
 | 
    52 		
 | 
| 
 | 
    53 		"""
 | 
| 
 | 
    54 		# Initialized constants during the life of a request:
 | 
| 
 | 
    55 		self.global_retrieval_date = None
 | 
| 
 | 
    56 		self.library_id = None
 | 
| 
 | 
    57 		self.history_id = None
 | 
| 
 | 
    58 		self.data_stores = []
 | 
| 
 | 
    59 		
 | 
| 
 | 
    60 		# Entire json library structure.  item.url, type=file|folder, id, name (library path) 
 | 
| 
 | 
    61 		# Note: changes to library during a request aren't reflected here.
 | 
| 
 | 
    62 		self.library = None 		
 | 
| 
 | 
    63 
 | 
| 
 | 
    64 		self.user_api_key = None	
 | 
| 
 | 
    65 		self.user_api = None
 | 
| 
 | 
    66 		self.admin_api_key = None
 | 
| 
 | 
    67 		self.admin_api = None
 | 
| 
 | 
    68 		self.api_url = None
 | 
| 
 | 
    69 			
 | 
| 
 | 
    70 
 | 
| 
 | 
    71 	def set_trans(self, api_url, history_id, user_api_key=None): #master_api_key=None, 
 | 
| 
 | 
    72 		"""
 | 
| 
 | 
    73 		Used only on initial presentation of versioned_data.xml form.  Doesn't need admin_api
 | 
| 
 | 
    74 		"""
 | 
| 
 | 
    75 		self.history_id = history_id
 | 
| 
 | 
    76 		self.api_url = api_url
 | 
| 
 | 
    77 		self.user_api_key = user_api_key
 | 
| 
 | 
    78 		#self.master_api_key = master_api_key
 | 
| 
 | 
    79 		
 | 
| 
 | 
    80 		self.set_user_api()
 | 
| 
 | 
    81 		self.set_admin_api()		
 | 
| 
 | 
    82 		self.set_datastores()
 | 
| 
 | 
    83 		
 | 
| 
 | 
    84 	
 | 
| 
 | 
    85 	def set_api(self, api_info_path):
 | 
| 
 | 
    86 		"""
 | 
| 
 | 
    87 		"api_info_path" is provided only when user submits tool via versioned_data.py call.  
 | 
| 
 | 
    88 		It encodes both the api_url and the history_id of current session
 | 
| 
 | 
    89 		Only at this point will we need the admin_api, so it is looked up below.
 | 
| 
 | 
    90 
 | 
| 
 | 
    91 		"""
 | 
| 
 | 
    92 
 | 
| 
 | 
    93 		with open(api_info_path, 'r') as access:
 | 
| 
 | 
    94 			
 | 
| 
 | 
    95 			self.user_api_key = access.readline().strip()
 | 
| 
 | 
    96 			#self.master_api_key = access.readline().strip()
 | 
| 
 | 
    97 			api_info = access.readline().strip() #[api_url]-[history_id]
 | 
| 
 | 
    98 			self.api_url, self.history_id = api_info.split('-')
 | 
| 
 | 
    99 		
 | 
| 
 | 
   100 		self.set_user_api()
 | 
| 
 | 
   101 		self.set_admin_api()
 | 
| 
 | 
   102 		self.set_datastores()
 | 
| 
 | 
   103 
 | 
| 
 | 
   104 
 | 
| 
 | 
   105 	def set_user_api(self):
 | 
| 
 | 
   106 		"""
 | 
| 
 | 
   107 		Note: error message tacked on to self.data_stores for display back to user.
 | 
| 
 | 
   108 		"""
 | 
| 
 | 
   109 		self.user_api = GalaxyInstance(url=self.api_url, key=self.user_api_key)
 | 
| 
 | 
   110 
 | 
| 
 | 
   111 		if not self.user_api:
 | 
| 
 | 
   112 			self.data_stores.append({'name':'Error: user Galaxy API connection was not set up correctly.  Try getting another user API key.', 'id':'none'})
 | 
| 
 | 
   113 			return
 | 
| 
 | 
   114 	
 | 
| 
 | 
   115 	
 | 
| 
 | 
   116 	def set_datastores(self):
 | 
| 
 | 
   117 		"""
 | 
| 
 | 
   118 		Provides the list of data stores that users can select versions from.
 | 
| 
 | 
   119 		Note: error message tacked on to self.data_stores for display back to user.
 | 
| 
 | 
   120 		"""
 | 
| 
 | 
   121 		# Look for library called "Versioned Data"
 | 
| 
 | 
   122 		try:
 | 
| 
 | 
   123 			libs = self.user_api.libraries.get_libraries(name=VDB_DATA_LIBRARY, deleted=False)
 | 
| 
 | 
   124 		except Exception as err: 
 | 
| 
 | 
   125 			# This is the first call to api so api url or authentication erro can happen here.
 | 
| 
 | 
   126 			self.data_stores.append({'name':'Error: Unable to make API connection: ' + err.message, 'id':'none'})
 | 
| 
 | 
   127 			return
 | 
| 
 | 
   128 
 | 
| 
 | 
   129 		found = False
 | 
| 
 | 
   130 		for lib in libs:
 | 
| 
 | 
   131 			if lib['deleted'] == False:
 | 
| 
 | 
   132 				found = True
 | 
| 
 | 
   133 				self.library_id = lib['id']
 | 
| 
 | 
   134 				break;
 | 
| 
 | 
   135 			
 | 
| 
 | 
   136 		if not found:		
 | 
| 
 | 
   137 			self.data_stores.append({'name':'Error: Data Library [%s] needs to be set up by a galaxy administrator.' % VDB_DATA_LIBRARY, 'id':'none'})
 | 
| 
 | 
   138 			return
 | 
| 
 | 
   139 
 | 
| 
 | 
   140 		try:
 | 
| 
 | 
   141 	 
 | 
| 
 | 
   142 			if self.admin_api:
 | 
| 
 | 
   143 				self.library = self.admin_api.libraries.show_library(self.library_id, contents=True) 
 | 
| 
 | 
   144 			else:
 | 
| 
 | 
   145 				self.library = self.user_api.libraries.show_library(self.library_id, contents=True) 
 | 
| 
 | 
   146 
 | 
| 
 | 
   147 		except Exception as err: 
 | 
| 
 | 
   148 			# If data within a library is somehow messed up (maybe user has no permissions?), this can generate a bioblend errorapi.
 | 
| 
 | 
   149 			if err.message[-21:] == 'HTTP status code: 403':
 | 
| 
 | 
   150 				self.data_stores.append({'name':'Error: [%s] library needs permissions adjusted so users can view it.' % VDB_DATA_LIBRARY , 'id':'none'})
 | 
| 
 | 
   151 			else:
 | 
| 
 | 
   152 				self.data_stores.append({'name':'Error: Unable to get [%s] library contents: %s' % (VDB_DATA_LIBRARY, err.message) , 'id':'none'})
 | 
| 
 | 
   153 			return
 | 
| 
 | 
   154 			
 | 
| 
 | 
   155 		# Need to ensure it is sorted folder/file wise such that folders listed by date/id descending (name leads with version date/id) files will follow).
 | 
| 
 | 
   156 		self.library = sorted(self.library, key=lambda x: x['name'], reverse=False)
 | 
| 
 | 
   157 
 | 
| 
 | 
   158 		# Gets list of data stores
 | 
| 
 | 
   159 		# For given library_id (usually called "Versioned Data"), retrieves folder/name
 | 
| 
 | 
   160 		# for any folder containing a data source specification file.  A folder should
 | 
| 
 | 
   161 		# have at most one of these.  It indicates the storage method used for the folder.
 | 
| 
 | 
   162 
 | 
| 
 | 
   163 		for item in self.library:
 | 
| 
 | 
   164 			if item['type'] == "file" and self.test_data_store_type(item['name']):
 | 
| 
 | 
   165 				# Returns id of specification file that points to data source.
 | 
| 
 | 
   166 				self.data_stores.append({
 | 
| 
 | 
   167 					'name':os.path.dirname(item['name']),
 | 
| 
 | 
   168 					'id':item['id']
 | 
| 
 | 
   169 				})
 | 
| 
 | 
   170 
 | 
| 
 | 
   171 	
 | 
| 
 | 
   172 	
 | 
| 
 | 
   173 	def set_admin_api(self):
 | 
| 
 | 
   174 	
 | 
| 
 | 
   175 		# Now fetch admin_api_key from disk, or regenerate user account and api from scratch.
 | 
| 
 | 
   176 		if os.path.isfile(VDB_ADMIN_API_KEY_PATH):
 | 
| 
 | 
   177 
 | 
| 
 | 
   178 			with open(VDB_ADMIN_API_KEY_PATH, 'r') as access:
 | 
| 
 | 
   179 				self.admin_api_key = access.readline().strip()
 | 
| 
 | 
   180 				self.api_url = access.readline().strip()
 | 
| 
 | 
   181 				
 | 
| 
 | 
   182 		else:
 | 
| 
 | 
   183 			# VERIFY THAT USER IS AN ADMIN
 | 
| 
 | 
   184 			user = self.user_api.users.get_current_user()
 | 
| 
 | 
   185 			if user['is_admin'] == False:
 | 
| 
 | 
   186 				print "Unable to establish the admin api: you need to be in the admin_user=... list in galaxy config."
 | 
| 
 | 
   187 				sys.exit(1)
 | 
| 
 | 
   188 			""" Future: will master API be able to do...
 | 
| 
 | 
   189 			#if not self.master_api_key:
 | 
| 
 | 
   190 			#	print "Unable to establish the admin api: no existing path to config file, and no master_api_key." + self.master_api_key
 | 
| 
 | 
   191 			#	sys.exit(1)
 | 
| 
 | 
   192 			# Generate from scratch:
 | 
| 
 | 
   193 			#master_api = GalaxyInstance(url=self.api_url, key=self.master_api_key)
 | 
| 
 | 
   194 			#users = master_api.users.get_users(deleted=False)
 | 
| 
 | 
   195 			"""
 | 
| 
 | 
   196 			users = self.user_api.users.get_users(deleted=False)
 | 
| 
 | 
   197 			for user in users:
 | 
| 
 | 
   198 
 | 
| 
 | 
   199 				if user['email'] == VDB_ADMIN_API_EMAIL:
 | 
| 
 | 
   200 					self.admin_api_key = self.user_api.users.create_user_apikey(user['id'])
 | 
| 
 | 
   201 
 | 
| 
 | 
   202 			if not self.admin_api_key:
 | 
| 
 | 
   203 				#Create admin api access account with dummy email address and reliable but secure password:
 | 
| 
 | 
   204 				# NOTE: this will only be considered an admin account if it is listed in galaxy config file as one.
 | 
| 
 | 
   205 				random_password = ''.join([choice(string.letters + string.digits) for i in range(15)])
 | 
| 
 | 
   206 				api_admin_user = self.user_api.users.create_local_user(VDB_ADMIN_API_USER, VDB_ADMIN_API_EMAIL, random_password)
 | 
| 
 | 
   207 				self.admin_api_key = self.user_api.users.create_user_apikey(api_admin_user['id'])
 | 
| 
 | 
   208 
 | 
| 
 | 
   209 			with open(VDB_ADMIN_API_KEY_PATH, 'w') as access:
 | 
| 
 | 
   210 				access.write(self.admin_api_key + '\n' + self.api_url)
 | 
| 
 | 
   211 				
 | 
| 
 | 
   212 		self.admin_api = GalaxyInstance(url=self.api_url, key=self.admin_api_key)
 | 
| 
 | 
   213 		
 | 
| 
 | 
   214 		if not self.admin_api:
 | 
| 
 | 
   215 			print 'Error: admin Galaxy API connection was not set up correctly.  Admin user should be ' + VDB_ADMIN_API_EMAIL
 | 
| 
 | 
   216 			print "Unexpected error:", sys.exc_info()[0]
 | 
| 
 | 
   217 			sys.exit(1)
 | 
| 
 | 
   218 			
 | 
| 
 | 
   219 		
 | 
| 
 | 
   220 	def get_data_store_gateway(self, type, spec_file_id):
 | 
| 
 | 
   221 		# NOTE THAT PYTHON NEVER TIMES OUT FOR THESE CALLS - BUT IT WILL TIME OUT FOR API CALLS.
 | 
| 
 | 
   222 		# FUTURE: Adapt this so that any modules in data_stores/ folder are usable
 | 
| 
 | 
   223 		# e.g. https://bbs.archlinux.org/viewtopic.php?id=109561
 | 
| 
 | 
   224 		# http://stackoverflow.com/questions/301134/dynamic-module-import-in-python
 | 
| 
 | 
   225 		
 | 
| 
 | 
   226 		# ****************** GIT ARCHIVE ****************
 | 
| 
 | 
   227 		if type == "git": 
 | 
| 
 | 
   228 			import data_stores.vdb_git
 | 
| 
 | 
   229 			return data_stores.vdb_git.VDBGitDataStore(self, spec_file_id)
 | 
| 
 | 
   230 		
 | 
| 
 | 
   231 		# ****************** Kipper ARCHIVE ****************
 | 
| 
 | 
   232 		elif type == "kipper":
 | 
| 
 | 
   233 			import data_stores.vdb_kipper
 | 
| 
 | 
   234 			return data_stores.vdb_kipper.VDBKipperDataStore(self, spec_file_id)
 | 
| 
 | 
   235 				
 | 
| 
 | 
   236 		# ****************** FILE FOLDER ******************	
 | 
| 
 | 
   237 		elif type == "folder":
 | 
| 
 | 
   238 			import data_stores.vdb_folder
 | 
| 
 | 
   239 			return data_stores.vdb_folder.VDBFolderDataStore(self, spec_file_id)
 | 
| 
 | 
   240 
 | 
| 
 | 
   241 		# ****************** BIOMAJ FOLDER ******************	
 | 
| 
 | 
   242 		elif type == "biomaj":
 | 
| 
 | 
   243 			import data_stores.vdb_biomaj
 | 
| 
 | 
   244 			return data_stores.vdb_biomaj.VDBBiomajDataStore(self, spec_file_id)
 | 
| 
 | 
   245 			
 | 
| 
 | 
   246 		else:
 | 
| 
 | 
   247 			print 'Error: %s not recognized as a valid data store type.' % type
 | 
| 
 | 
   248 			sys.exit( 1 )
 | 
| 
 | 
   249 		
 | 
| 
 | 
   250 	
 | 
| 
 | 
   251 	#For a given path leading to pointer.[git|kipper|folder|biomaj] returns suffix
 | 
| 
 | 
   252 	def test_data_store_type(self, file_name, file_path=None):
 | 
| 
 | 
   253 		if file_path and not os.path.isfile(file_path):
 | 
| 
 | 
   254 			return False
 | 
| 
 | 
   255 		
 | 
| 
 | 
   256 		suffix = file_name.rsplit('.',1)		
 | 
| 
 | 
   257 		if len(suffix) > 1 and suffix[1] in VDB_STORAGE_OPTIONS:
 | 
| 
 | 
   258 			return suffix[1]
 | 
| 
 | 
   259 		
 | 
| 
 | 
   260 		return False
 | 
| 
 | 
   261 		
 | 
| 
 | 
   262 		
 | 
| 
 | 
   263 
 | 
| 
 | 
   264 	
 | 
| 
 | 
   265 
 | 
| 
 | 
   266 	def get_library_data_store_list(self):
 | 
| 
 | 
   267 		"""
 | 
| 
 | 
   268 		For display on tool form, returns names, ids of specification files that point to data sources.
 | 
| 
 | 
   269 		
 | 
| 
 | 
   270 		@return dirs array of [[folder label], [folder_id, selected]...] 
 | 
| 
 | 
   271 		"""
 | 
| 
 | 
   272 		dirs = []
 | 
| 
 | 
   273 		# Gets recursive contents of library - files and folders
 | 
| 
 | 
   274 		for item in self.data_stores:
 | 
| 
 | 
   275 			dirs.append([item['name'], item['id'], False])
 | 
| 
 | 
   276 		
 | 
| 
 | 
   277 		return dirs
 | 
| 
 | 
   278 
 | 
| 
 | 
   279 
 | 
| 
 | 
   280 	def get_library_label_path(self, spec_file_id):
 | 
| 
 | 
   281 		for item in self.data_stores:
 | 
| 
 | 
   282 			if item['id'] == spec_file_id:
 | 
| 
 | 
   283 				return item['name']
 | 
| 
 | 
   284 
 | 
| 
 | 
   285 		return None
 | 
| 
 | 
   286 
 | 
| 
 | 
   287 		
 | 
| 
 | 
   288 	def get_library_folder_datasets(self, library_version_path, admin=False):
 | 
| 
 | 
   289 		"""
 | 
| 
 | 
   290 		Gets set of ALL dataset FILES within folder - INCLUDING SUBFOLDERS - by searching 
 | 
| 
 | 
   291 		through a library, examining each item's full hierarchic label
 | 
| 
 | 
   292 		BUT CURRENTLY: If any file has state='error' the whole list is rejected (and regenerated).
 | 
| 
 | 
   293 		
 | 
| 
 | 
   294 		WISHLIST: HAVE AN API FUNCTION TO GET ONLY A GIVEN FOLDER'S (BY ID) CONTENTS!
 | 
| 
 | 
   295 		
 | 
| 
 | 
   296 		@param library_version_path string Full hierarchic label of a library file or folder.
 | 
| 
 | 
   297 		
 | 
| 
 | 
   298 		@return array of ldda_id library dataset data association ids.	
 | 
| 
 | 
   299 		"""
 | 
| 
 | 
   300 		
 | 
| 
 | 
   301 		if admin:
 | 
| 
 | 
   302 			api_handle = self.admin_api
 | 
| 
 | 
   303 		else:
 | 
| 
 | 
   304 			api_handle = self.user_api
 | 
| 
 | 
   305 			
 | 
| 
 | 
   306 		count = 0
 | 
| 
 | 
   307 		while count < 4:
 | 
| 
 | 
   308 			try:
 | 
| 
 | 
   309 				items = api_handle.libraries.show_library(self.library_id, True) 
 | 
| 
 | 
   310 				break
 | 
| 
 | 
   311 			except ChunkedEncodingError:
 | 
| 
 | 
   312 				print "Error: Trying to fetch Versioned Data library listing. Try [" + str(count) + "]"
 | 
| 
 | 
   313 				time.sleep (2)
 | 
| 
 | 
   314 				pass
 | 
| 
 | 
   315 				
 | 
| 
 | 
   316 			count +=1	
 | 
| 
 | 
   317 
 | 
| 
 | 
   318 		datasets = []
 | 
| 
 | 
   319 		libvpath_len = len(library_version_path) + 1
 | 
| 
 | 
   320 		for item in items:
 | 
| 
 | 
   321 			if item['type'] == "file":
 | 
| 
 | 
   322 				name = item['name']
 | 
| 
 | 
   323 				# need slash or else will match to similar prefixes.
 | 
| 
 | 
   324 				if name[0:libvpath_len] == library_version_path + '/': 
 | 
| 
 | 
   325 				
 | 
| 
 | 
   326 					# ISSUE seems to be that input library datasets can be queued / running, and this MUST wait till they are finished or it will plow ahead prematurely.
 | 
| 
 | 
   327 					
 | 
| 
 | 
   328 					count = 0
 | 
| 
 | 
   329 				
 | 
| 
 | 
   330 					while count < 10:
 | 
| 
 | 
   331 						
 | 
| 
 | 
   332 						try:
 | 
| 
 | 
   333 							lib_dataset = api_handle.libraries.show_dataset(self.library_id, item['id'])
 | 
| 
 | 
   334 							break
 | 
| 
 | 
   335 							
 | 
| 
 | 
   336 						except: 
 | 
| 
 | 
   337 							print "Unexpected error:", sys.exc_info()[0]
 | 
| 
 | 
   338 							sys.exit(1)
 | 
| 
 | 
   339 
 | 
| 
 | 
   340 						if lib_dataset['state'] == 'running':
 | 
| 
 | 
   341 							time.sleep(10)
 | 
| 
 | 
   342 							count +=1
 | 
| 
 | 
   343 							continue
 | 
| 
 | 
   344 						
 | 
| 
 | 
   345 						elif lib_dataset['state'] == 'queued':
 | 
| 
 | 
   346 						
 | 
| 
 | 
   347 							# FUTURE: Check date.  If it is really stale it should be killed?
 | 
| 
 | 
   348 							print 'Note: library folder dataset item "%s" is [%s].  Please wait until it is finished processing, or have a galaxy administrator delete the dataset if its creation has failed.' % (name,  lib_dataset['state'])
 | 
| 
 | 
   349 							sys.exit(1)
 | 
| 
 | 
   350 
 | 
| 
 | 
   351 						elif lib_dataset['state'] != 'ok' or not os.path.isfile(lib_dataset['file_name']):
 | 
| 
 | 
   352 							print 'Note: library folder dataset "%s" had an error during job.  Its state was [%s]. Regenerating.' % (name, lib_dataset['state'])
 | 
| 
 | 
   353 							self.admin_api.libraries.delete_library_dataset(self.library_id, item['id'], purged=True)
 | 
| 
 | 
   354 							return []
 | 
| 
 | 
   355 						
 | 
| 
 | 
   356 						else:
 | 
| 
 | 
   357 							break
 | 
| 
 | 
   358 							
 | 
| 
 | 
   359 					datasets.append(item['id'])
 | 
| 
 | 
   360 
 | 
| 
 | 
   361 		
 | 
| 
 | 
   362 		return datasets
 | 
| 
 | 
   363 
 | 
| 
 | 
   364 
 | 
| 
 | 
   365 	def get_library_version_datasets(self, library_version_path, base_folder_id='', version_label='', version_path=''):
 | 
| 
 | 
   366 		"""
 | 
| 
 | 
   367 		Check if given library has a folder for given version_path.  If so:
 | 
| 
 | 
   368 		 - and it has content, return its datasets.
 | 
| 
 | 
   369 		 - otherwise refetch content for verison folder
 | 
| 
 | 
   370 		If no folder, populate the version folder with data from the archive and return those datasets.
 | 
| 
 | 
   371 		Version exists in external cache (or in case of unlinked folder, in EXISTING galaxy library folder).
 | 
| 
 | 
   372 		Don't call unless version_path contents have been established. 
 | 
| 
 | 
   373 
 | 
| 
 | 
   374 		@param library_version_path string Full hierarchic label of a library file or folder with version id.
 | 
| 
 | 
   375 
 | 
| 
 | 
   376 		For creation:
 | 
| 
 | 
   377 		@param base_folder_id string a library folder id under which version files should exist	
 | 
| 
 | 
   378 		@param version_label string Label to give newly created galaxy library version folder
 | 
| 
 | 
   379 		@param version_path string Data source folder to retrieve versioned data files from
 | 
| 
 | 
   380 
 | 
| 
 | 
   381 		@return array of dataset	
 | 
| 
 | 
   382 		"""
 | 
| 
 | 
   383 		
 | 
| 
 | 
   384 		
 | 
| 
 | 
   385 		# Pick the first folder of any that match given 'Versioned Data/.../.../[version id]' path.
 | 
| 
 | 
   386 		# This case will always match 'folder' data store:
 | 
| 
 | 
   387 
 | 
| 
 | 
   388 		folder_matches = self.get_folders(name=library_version_path) 
 | 
| 
 | 
   389 		
 | 
| 
 | 
   390 		if len(folder_matches):
 | 
| 
 | 
   391 			
 | 
| 
 | 
   392 			folder_id = folder_matches[0]['id']
 | 
| 
 | 
   393 			dataset_ids = self.get_library_folder_datasets(library_version_path)
 | 
| 
 | 
   394 		
 | 
| 
 | 
   395 			if len(dataset_ids) > 0:
 | 
| 
 | 
   396 
 | 
| 
 | 
   397 				return dataset_ids
 | 
| 
 | 
   398 			
 | 
| 
 | 
   399 			if os.listdir(version_path) == []:
 | 
| 
 | 
   400 				# version_path doesn't exist for 'folder' data store versions that are datasets directly in library (i.e. not linked)
 | 
| 
 | 
   401 				print "Error: the data store didn't return any content for given version id.  Looked in: " + version_path
 | 
| 
 | 
   402 				sys.exit(1)
 | 
| 
 | 
   403 		
 | 
| 
 | 
   404 			# NOTE ONE 3rd party COMMENT THAT ONE SHOULD PUT IN file_type='fasta' FOR LARGE FILES.  Problem with that is that then galaxy can't recognize other data types.
 | 
| 
 | 
   405 			library_folder_datasets = self.admin_api.libraries.upload_from_galaxy_filesystem(self.library_id, version_path, folder_id, link_data_only=True, roles=None)
 | 
| 
 | 
   406 
 | 
| 
 | 
   407 				
 | 
| 
 | 
   408 		else:
 | 
| 
 | 
   409 			if base_folder_id == '': #Normally shouldn't happen
 | 
| 
 | 
   410 
 | 
| 
 | 
   411 				print "Error: no match to given version folder for [" + library_version_path + "] but unable to create one - missing parent folder identifier"
 | 
| 
 | 
   412 				return []
 | 
| 
 | 
   413 
 | 
| 
 | 
   414 			# Provide archive folder with datestamped name and version (folderNew has url, id, name):
 | 
| 
 | 
   415 			folderNew = self.admin_api.libraries.create_folder(self.library_id, version_label, description=VDB_CACHED_DATA_LABEL, base_folder_id=base_folder_id)
 | 
| 
 | 
   416 			folder_id = str(folderNew[0]['id'])
 | 
| 
 | 
   417 			
 | 
| 
 | 
   418 			# Now link results to suitably named galaxy library dataset
 | 
| 
 | 
   419 			# Note, this command links to EVERY file/folder in version_folder source.
 | 
| 
 | 
   420 			# Also, Galaxy will strip off .gz suffixes - WITHOUT UNCOMPRESSING FILES!
 | 
| 
 | 
   421 			# So, best to prevent data store from showing .gz files in first place
 | 
| 
 | 
   422 			try:
 | 
| 
 | 
   423 				library_folder_datasets = self.admin_api.libraries.upload_from_galaxy_filesystem(self.library_id, version_path, folder_id, link_data_only=True, roles=None)
 | 
| 
 | 
   424 
 | 
| 
 | 
   425 			except: 
 | 
| 
 | 
   426 				# Will return error if version_path folder is empty or kipper unable to create folder or db due to permissions etc.
 | 
| 
 | 
   427 				print "Error: a permission or other error was encountered when trying to retrieve version data for version folder [" + version_path + "]: Is the [%s] listed in galaxy config admin_users list?" % VDB_ADMIN_API_EMAIL, sys.exc_info()[0]
 | 
| 
 | 
   428 				sys.exit(1)
 | 
| 
 | 
   429 
 | 
| 
 | 
   430 
 | 
| 
 | 
   431 		library_dataset_ids = [dataset['id'] for dataset in library_folder_datasets]
 | 
| 
 | 
   432 
 | 
| 
 | 
   433 		# LOOP WAITS UNTIL THESE DATASETS ARE UPLOADED.  
 | 
| 
 | 
   434 		# They still take time even for linked big data probably because they are read for metadata.
 | 
| 
 | 
   435 		# Not nice that user doesn't see process as soon as it starts, but timeout possibilities
 | 
| 
 | 
   436 		# later on down the line are more difficult to manage.
 | 
| 
 | 
   437 		for dataset_id in library_dataset_ids:
 | 
| 
 | 
   438 			# ten seconds x 60 = 6 minutes; should be longer?
 | 
| 
 | 
   439 			for count in range(60): 
 | 
| 
 | 
   440 				try:
 | 
| 
 | 
   441 					lib_dataset = self.admin_api.libraries.show_dataset(self.library_id, dataset_id)
 | 
| 
 | 
   442 					break
 | 
| 
 | 
   443 					
 | 
| 
 | 
   444 				except: 
 | 
| 
 | 
   445 					print "Unexpected error:", sys.exc_info()[0]
 | 
| 
 | 
   446 					continue
 | 
| 
 | 
   447 
 | 
| 
 | 
   448 				if lib_dataset['state'] in 'running queued':
 | 
| 
 | 
   449 					time.sleep(10)
 | 
| 
 | 
   450 					count +=1
 | 
| 
 | 
   451 					continue
 | 
| 
 | 
   452 				else:
 | 
| 
 | 
   453 					# Possibly in a nice "ok" or not nice state here.
 | 
| 
 | 
   454 					break
 | 
| 
 | 
   455 
 | 
| 
 | 
   456 
 | 
| 
 | 
   457 		return library_dataset_ids
 | 
| 
 | 
   458 
 | 
| 
 | 
   459 
 | 
| 
 | 
   460 	def get_folders(self, name):
 | 
| 
 | 
   461 		"""
 | 
| 
 | 
   462 		ISSUE: Have run into this sporadic error with a number of bioblend api calls.  Means api calls may need to be wrapped in a retry mechanism:
 | 
| 
 | 
   463 		File "/usr/lib/python2.6/site-packages/requests/models.py", line 656, in generate
 | 
| 
 | 
   464 		raise ChunkedEncodingError(e)
 | 
| 
 | 
   465 		requests.exceptions.ChunkedEncodingError: ('Connection broken: IncompleteRead(475 bytes read)', IncompleteRead(475 bytes read))
 | 
| 
 | 
   466 		"""
 | 
| 
 | 
   467 		for count in range(3):
 | 
| 
 | 
   468 			try:
 | 
| 
 | 
   469 				return self.user_api.libraries.get_folders(self.library_id, name=name ) 
 | 
| 
 | 
   470 				break
 | 
| 
 | 
   471 				
 | 
| 
 | 
   472 			except:
 | 
| 
 | 
   473 				print 'Try (%s) to fetch library folders for "%s"' % (str(count), name)
 | 
| 
 | 
   474 				print sys.exc_info()[0]
 | 
| 
 | 
   475 				time.sleep (5)
 | 
| 
 | 
   476 
 | 
| 
 | 
   477 		print "Failed after (%s) tries!" % (str(count))
 | 
| 
 | 
   478 		return None
 | 
| 
 | 
   479 				
 | 
| 
 | 
   480 
 | 
| 
 | 
   481 	def get_library_folder(self, library_path, relative_path, relative_labels):
 | 
| 
 | 
   482 		"""
 | 
| 
 | 
   483 		Check if given library has folder that looks like library_path + relative_path.  
 | 
| 
 | 
   484 		If not, create and return resulting id.  Used for cache creation.
 | 
| 
 | 
   485 		Ignores bad library_path.
 | 
| 
 | 
   486 		
 | 
| 
 | 
   487 		@param library_path string Full hierarchic label of a library folder.  NOTE: Library_path must have leading forward slash for a match, i.e. /derivative_path
 | 
| 
 | 
   488 		@param relative_path string branch of folder tree stemming from library_path	
 | 
| 
 | 
   489 		@param relative_labels string label for each relative_path item
 | 
| 
 | 
   490 		
 | 
| 
 | 
   491 		@return folder_id
 | 
| 
 | 
   492 		"""
 | 
| 
 | 
   493 		created = False
 | 
| 
 | 
   494 		root_match = self.get_folders( name=library_path)
 | 
| 
 | 
   495 		
 | 
| 
 | 
   496 		if len(root_match):
 | 
| 
 | 
   497 			base_folder_id=root_match[0]['id']
 | 
| 
 | 
   498 			
 | 
| 
 | 
   499 			relative_path_array = relative_path.split('/')
 | 
| 
 | 
   500 			relative_labels_array = relative_labels.split('/')
 | 
| 
 | 
   501 			
 | 
| 
 | 
   502 			for ptr in range(len (relative_path_array)):
 | 
| 
 | 
   503 
 | 
| 
 | 
   504 				_library_path = os.path.join(library_path, '/'.join(relative_path_array[0:ptr+1]))
 | 
| 
 | 
   505 				folder_matches = self.get_folders( name=_library_path)
 | 
| 
 | 
   506 
 | 
| 
 | 
   507 				if len(folder_matches):
 | 
| 
 | 
   508 					folder_id = folder_matches[0]['id']
 | 
| 
 | 
   509 				else:
 | 
| 
 | 
   510 					dataset_key = relative_path_array[ptr]
 | 
| 
 | 
   511 					label = relative_labels_array[ptr]
 | 
| 
 | 
   512 					folder_new = self.admin_api.libraries.create_folder(self.library_id, dataset_key, description=label, base_folder_id=base_folder_id)
 | 
| 
 | 
   513 					folder_id = str(folder_new[0]['id'])
 | 
| 
 | 
   514 					
 | 
| 
 | 
   515 				base_folder_id = folder_id
 | 
| 
 | 
   516 			
 | 
| 
 | 
   517 			return folder_id
 | 
| 
 | 
   518 
 | 
| 
 | 
   519 		return None
 | 
| 
 | 
   520 
 | 
| 
 | 
   521 
 | 
| 
 | 
   522 	def get_library_folders(self, library_label_path):
 | 
| 
 | 
   523 		"""
 | 
| 
 | 
   524 		Gets set of ALL folders within given library path.  Within each folder, lists its files as well.
 | 
| 
 | 
   525 		Folders are ordered by version date/id, most recent first (natural sort).
 | 
| 
 | 
   526 	
 | 
| 
 | 
   527 		NOT Quite recursive. Nested folders don't have parent info.
 | 
| 
 | 
   528 
 | 
| 
 | 
   529 		@param library_version_path string Full hierarchic label of a library folder. Inside it are version subfolders, their datasets, and the pointer file.
 | 
| 
 | 
   530 	
 | 
| 
 | 
   531 		@return array of ids of the version subfolders and also their dataset content ids
 | 
| 
 | 
   532 		"""
 | 
| 
 | 
   533 		
 | 
| 
 | 
   534 		folders = []
 | 
| 
 | 
   535 		libvpath_len = len(library_label_path)
 | 
| 
 | 
   536 		for item in self.library:
 | 
| 
 | 
   537 
 | 
| 
 | 
   538 			name = item['name']
 | 
| 
 | 
   539 			if name[0:libvpath_len] == library_label_path: 
 | 
| 
 | 
   540 
 | 
| 
 | 
   541 				# Skip any file that is immediately under library_label_path			
 | 
| 
 | 
   542 				if item['type'] == 'file':
 | 
| 
 | 
   543 					file_key_val = item['name'].rsplit('/',1)
 | 
| 
 | 
   544 					#file_name_parts = file_key_val[1].split('.')
 | 
| 
 | 
   545 					if file_key_val[0] == library_label_path:
 | 
| 
 | 
   546 					#and len(file_name_parts) > 1 \
 | 
| 
 | 
   547 					#and file_name_parts[1] in VDB_STORAGE_OPTIONS:
 | 
| 
 | 
   548 						continue
 | 
| 
 | 
   549 
 | 
| 
 | 
   550 				if item['type'] == 'folder':
 | 
| 
 | 
   551 					folders.append({'id':item['id'], 'name':item['name'], 'files':[]})
 | 
| 
 | 
   552 				
 | 
| 
 | 
   553 				else:
 | 
| 
 | 
   554 					# Items should be sorted ascending such that each item is contained in previous folder.
 | 
| 
 | 
   555 					folders[-1]['files'].append({'id':item['id'], 'name':item['name']})
 | 
| 
 | 
   556 	
 | 
| 
 | 
   557 		return folders
 | 
| 
 | 
   558 
 | 
| 
 | 
   559 	
 | 
| 
 | 
   560 	def get_workflow_data(self, workflow_list, datasets, version_id):
 | 
| 
 | 
   561 		"""
 | 
| 
 | 
   562 		Run each workflow in turn, given datasets generated above.
 | 
| 
 | 
   563 		See if each workflow's output has been cached.
 | 
| 
 | 
   564 		If not, run workflow and reestablish output data
 | 
| 
 | 
   565 		Complexity is that cache could be:
 | 
| 
 | 
   566 		1) in user's history.
 | 
| 
 | 
   567 		2) in library data folder called "derivative_cache" under data source folder  (as created by this galaxy install)
 | 
| 
 | 
   568 		3) in external data folder ..."/derivative_cache" (as created by this galaxy install)
 | 
| 
 | 
   569 			BUT other galaxy installs can't really use this unless they know metadata on workflow that generated it
 | 
| 
 | 
   570 			In future we'll design a system for different galaxies to be able to read metadata to determine if they can use the cached workflow data here.
 | 
| 
 | 
   571 
 | 
| 
 | 
   572 		ISSUE Below: Unless it is a really short workflow, run_workflow() returns before work is actually complete.  DO WE NEED TO DELAY UNTIL EVERY SINGLE OUTPUT DATASET IS "ok", not just "queued" or "running"?  OR IS SERVER TO LIBRARY UPLOAD PAUSE ABOVE ENOUGH?
 | 
| 
 | 
   573 														
 | 
| 
 | 
   574 		Note, workflow_list contains only ids for items beginning with "versioning: "
 | 
| 
 | 
   575 		FUTURE IMPROVEMENT: LOCK WORKFLOW: VULNERABILITY: IF WORKFLOW CHANGES, THAT AFFECTS REPRODUCABILITY.
 | 
| 
 | 
   576 
 | 
| 
 | 
   577 		FUTURE: NEED TO ENSURE EACH dataset id not found in history is retrieved from cache.				
 | 
| 
 | 
   578 		FUTURE: Check to see that EVERY SINGLE workflow output 
 | 
| 
 | 
   579 		has a corresponding dataset_id in history or library, 
 | 
| 
 | 
   580 		i.e. len(workflow['outputs']) == len(history_dataset_ids)
 | 
| 
 | 
   581 		But do we know before execution how many outputs (given conditional output?)
 | 
| 
 | 
   582 
 | 
| 
 | 
   583 		@param workflow_list
 | 
| 
 | 
   584 		@param datasets: an array of correct data source versioned datasets that are inputs to tools and workflows
 | 
| 
 | 
   585 		@param version_id
 | 
| 
 | 
   586 		
 | 
| 
 | 
   587 		"""
 | 
| 
 | 
   588 		for workflow_id in workflow_list.split():
 | 
| 
 | 
   589 			
 | 
| 
 | 
   590 			workflows = self.admin_api.workflows.get_workflows(workflow_id, published=True)
 | 
| 
 | 
   591 
 | 
| 
 | 
   592 			if not len(workflows):
 | 
| 
 | 
   593 				# Error occurs if admin_api user doesn't have permissions on this workflow???  
 | 
| 
 | 
   594 				# Currently all workflows have to be shared with VDB_ADMIN_API_EMAIL.  
 | 
| 
 | 
   595 				# Future: could get around this by using publicly shared workflows via "import_shared_workflow(workflow_id)"
 | 
| 
 | 
   596 				print 'Error: unable to run workflow - has it been shared with the Versioned Data tool user email address "%s" ?' % VDB_ADMIN_API_EMAIL
 | 
| 
 | 
   597 				sys.exit(1)
 | 
| 
 | 
   598 				
 | 
| 
 | 
   599 			for workflow_summary in workflows:
 | 
| 
 | 
   600 
 | 
| 
 | 
   601 				workflow = self.admin_api.workflows.show_workflow(workflow_id)
 | 
| 
 | 
   602 				print 'Doing workflow: "' + workflow_summary['name'] + '"'	
 | 
| 
 | 
   603 
 | 
| 
 | 
   604 				if len(workflow['inputs']) == 0:
 | 
| 
 | 
   605 					print "ERROR: This workflow is not configured correctly - it needs at least 1 input dataset step." 
 | 
| 
 | 
   606 				
 | 
| 
 | 
   607 				# FUTURE: Bring greater intelligence to assigning inputs to workflow?!!!
 | 
| 
 | 
   608 				if len(datasets) < len(workflow['inputs']):
 | 
| 
 | 
   609 				
 | 
| 
 | 
   610 					print 'Error: workflow requires more inputs (%s) than are available in retrieved datasets (%s) for this version of retrieved data.' % (len(workflow['inputs']), len(datasets))
 | 
| 
 | 
   611 					sys.exit(1)
 | 
| 
 | 
   612 					
 | 
| 
 | 
   613 				codings = self.get_codings(workflow, datasets)
 | 
| 
 | 
   614 				(workflow_input_key, workflow_input_label, annotation_key, dataset_map) = codings
 | 
| 
 | 
   615 				
 | 
| 
 | 
   616 				history_dataset_ids = self.get_history_workflow_results(annotation_key)
 | 
| 
 | 
   617 
 | 
| 
 | 
   618 				if not history_dataset_ids:
 | 
| 
 | 
   619 
 | 
| 
 | 
   620 					library_cache_path = os.path.join("/", VDB_WORKFLOW_CACHE_FOLDER_NAME, workflow_id, workflow_input_key)
 | 
| 
 | 
   621 
 | 
| 
 | 
   622 					# This has to be privileged api admin fetch.
 | 
| 
 | 
   623 					library_dataset_ids = self.get_library_folder_datasets(library_cache_path, admin=True)
 | 
| 
 | 
   624 				
 | 
| 
 | 
   625 					if not len(library_dataset_ids):
 | 
| 
 | 
   626 						# No cache in library so run workflow
 | 
| 
 | 
   627 
 | 
| 
 | 
   628 						# Create admin_api history
 | 
| 
 | 
   629 						admin_history = self.admin_api.histories.create_history()
 | 
| 
 | 
   630 						admin_history_id = admin_history['id']
 | 
| 
 | 
   631 				
 | 
| 
 | 
   632 						# If you try to run a workflow that hasn't been shared with you, it seems to go a bit brezerk.  
 | 
| 
 | 
   633 						work_result = self.admin_api.workflows.run_workflow(workflow_id, dataset_map=dataset_map, history_id=admin_history_id) 
 | 
| 
 | 
   634 
 | 
| 
 | 
   635 						# Then copy (link) results back to library so can match in future
 | 
| 
 | 
   636 						self.cache_datasets(library_cache_path, work_result, workflow_summary, codings, version_id, admin_history_id)
 | 
| 
 | 
   637 
 | 
| 
 | 
   638 						# Now return the new cached library dataset ids:
 | 
| 
 | 
   639 						library_dataset_ids =  self.get_library_folder_datasets(library_cache_path, admin=True)
 | 
| 
 | 
   640 						""" If a dataset is purged, its purged everywhere... so don't purge!  Let caching system do that.
 | 
| 
 | 
   641 						THIS APPEARS TO HAPPEN TOO QUICKLY FOR LARGE DATABASES; LEAVE IT TO CACHING MECHANISM TO CLEAR.  OR ABOVE FIX TO WAIT UNTIL DS IS OK.
 | 
| 
 | 
   642 						self.admin_api.histories.delete_history(admin_history_id, purge=False)
 | 
| 
 | 
   643 						"""
 | 
| 
 | 
   644 						
 | 
| 
 | 
   645 					# Now link library cache workflow results to history and add key there for future match.
 | 
| 
 | 
   646 					self.update_history(library_dataset_ids, annotation_key, version_id)
 | 
| 
 | 
   647 
 | 
| 
 | 
   648 
 | 
| 
 | 
   649 
 | 
| 
 | 
   650 
 | 
| 
 | 
   651 	def update_history(self, library_dataset_ids, annotation, version_id):
 | 
| 
 | 
   652 		"""
 | 
| 
 | 
   653 		Copy datasets from library over to current history if they aren't already there.
 | 
| 
 | 
   654 		Must cycle through history datasets, looking for "copied_from_ldda_id" value.  This is available only with details view.
 | 
| 
 | 
   655 		
 | 
| 
 | 
   656 		@param library_dataset_ids array List of dataset Ids to copy from library folder
 | 
| 
 | 
   657 		@param annotation string annotation to add (e.g. Path of original version folder added as annotation)
 | 
| 
 | 
   658 		@param version_id string Label to add to copied dataset in user's history
 | 
| 
 | 
   659 		"""
 | 
| 
 | 
   660 		history_datasets = self.user_api.histories.show_history(self.history_id, contents=True, deleted=False, visible=True, details='all' , types=None) # ,
 | 
| 
 | 
   661 		
 | 
| 
 | 
   662 		datasets = []
 | 
| 
 | 
   663 		for dataset_id in library_dataset_ids:
 | 
| 
 | 
   664 			# USING ADMIN_API because that's only way to get workflow items back... user_api doesn't nec. have view rights on newly created workflow items.  Only versioneddata@localhost.com has perms.
 | 
| 
 | 
   665 			ld_dataset = self.admin_api.libraries.show_dataset(self.library_id, dataset_id)
 | 
| 
 | 
   666 
 | 
| 
 | 
   667 			if not ld_dataset['state'] in 'ok running queued':
 | 
| 
 | 
   668 			
 | 
| 
 | 
   669 				print "Error when linking to library dataset cache [" + ld_dataset['name'] + ", " + ld_dataset['id'] + "] - it isn't in a good state: " + ld_dataset['state']
 | 
| 
 | 
   670 				sys.exit(1)
 | 
| 
 | 
   671 			
 | 
| 
 | 
   672 			if not os.path.isfile(ld_dataset['file_name']):
 | 
| 
 | 
   673 				pass
 | 
| 
 | 
   674 				#FUTURE: SHOULD TRIGGER LIBRARY REGENERATION OF ITEM?
 | 
| 
 | 
   675 				
 | 
| 
 | 
   676 			library_ldda_id = ld_dataset['ldda_id']
 | 
| 
 | 
   677 			
 | 
| 
 | 
   678 			# Find out if library dataset item is already in history, and if so, just return that item.
 | 
| 
 | 
   679 			dataset = None		
 | 
| 
 | 
   680 			for dataset2 in history_datasets:
 | 
| 
 | 
   681 				
 | 
| 
 | 
   682 				if 'copied_from_ldda_id' in dataset2 \
 | 
| 
 | 
   683 				and dataset2['copied_from_ldda_id'] == library_ldda_id \
 | 
| 
 | 
   684 				and dataset2['state'] in 'ok running' \
 | 
| 
 | 
   685 				and dataset2['accessible'] == True:
 | 
| 
 | 
   686 					dataset = dataset2
 | 
| 
 | 
   687 					break
 | 
| 
 | 
   688 			
 | 
| 
 | 
   689 			if not dataset: # link in given dataset from library
 | 
| 
 | 
   690 
 | 
| 
 | 
   691 				dataset = self.user_api.histories.upload_dataset_from_library(self.history_id, dataset_id)
 | 
| 
 | 
   692 				
 | 
| 
 | 
   693 				# Update dataset's label - not necessary, just hinting at its creation.
 | 
| 
 | 
   694 				new_name = dataset['name']
 | 
| 
 | 
   695 				if dataset['name'][-len(version_id):] != version_id:
 | 
| 
 | 
   696 					new_name += ' ' + version_id
 | 
| 
 | 
   697 
 | 
| 
 | 
   698 				self.user_api.histories.update_dataset(self.history_id, dataset['id'], name=new_name, annotation = annotation) 
 | 
| 
 | 
   699 			
 | 
| 
 | 
   700 			datasets.append({
 | 
| 
 | 
   701 				'id': dataset['id'], 
 | 
| 
 | 
   702 				'ld_id': ld_dataset['id'],
 | 
| 
 | 
   703 				'name': dataset['name'], 
 | 
| 
 | 
   704 				'ldda_id': library_ldda_id, 
 | 
| 
 | 
   705 				'library_dataset_name': ld_dataset['name'],
 | 
| 
 | 
   706 				'state': ld_dataset['state']
 | 
| 
 | 
   707 			})
 | 
| 
 | 
   708 
 | 
| 
 | 
   709 		return datasets
 | 
| 
 | 
   710 
 | 
| 
 | 
   711 
 | 
| 
 | 
   712 	def get_codings(self, workflow, datasets):
 | 
| 
 | 
   713 		"""
 | 
| 
 | 
   714 		Returns a number of coded lists or arrays for use in caching or displaying workflow results.
 | 
| 
 | 
   715 		Note: workflow['inputs'] = {u'23': {u'label': u'Input Dataset', u'value': u''}},
 | 
| 
 | 
   716 		Note: step_id is not incremental.
 | 
| 
 | 
   717 		Note: VERY COMPLICATED because of hda/ldda/ld ids
 | 
| 
 | 
   718 
 | 
| 
 | 
   719 		FUTURE: IS METADATA AVAILABLE TO BETTER MATCH WORKFLOW INPUTS TO DATA SOURCE RECALL VERSIONS?
 | 
| 
 | 
   720 		ISSUE: IT IS ASSUMED ALL INPUTS TO WORKFLOW ARE AVAILABLE AS DATASETS BY ID IN LIBRARY.  I.e.
 | 
| 
 | 
   721 		one can't have a workflow that also makes reference to another just-generated file in user's 
 | 
| 
 | 
   722 		history.
 | 
| 
 | 
   723 		"""
 | 
| 
 | 
   724 		db_ptr = 0
 | 
| 
 | 
   725 		dataset_map = {}
 | 
| 
 | 
   726 		workflow_input_key = []
 | 
| 
 | 
   727 		workflow_input_labels = []
 | 
| 
 | 
   728 
 | 
| 
 | 
   729 		for step_id, ds_in in workflow['inputs'].iteritems():
 | 
| 
 | 
   730 			input_dataset_id = datasets[db_ptr]['ld_id']
 | 
| 
 | 
   731 			ldda_id = datasets[db_ptr]['ldda_id']
 | 
| 
 | 
   732 			dataset_map[step_id] = {'src': 'ld', 'id': input_dataset_id} 
 | 
| 
 | 
   733 			workflow_input_key.append(ldda_id) #like dataset_index but from workflow input perspective
 | 
| 
 | 
   734 			workflow_input_labels.append(datasets[db_ptr]['name'])
 | 
| 
 | 
   735 			db_ptr += 1
 | 
| 
 | 
   736 			
 | 
| 
 | 
   737 		workflow_input_key = '_'.join(workflow_input_key)
 | 
| 
 | 
   738 		workflow_input_labels = ', '.join(workflow_input_labels)
 | 
| 
 | 
   739 		annotation_key = workflow['id'] + ":" + workflow_input_key
 | 
| 
 | 
   740 		
 | 
| 
 | 
   741 		return (workflow_input_key, workflow_input_labels, annotation_key, dataset_map)
 | 
| 
 | 
   742 
 | 
| 
 | 
   743 
 | 
| 
 | 
   744 	def get_history_workflow_results(self, annotation):
 | 
| 
 | 
   745 		"""	
 | 
| 
 | 
   746 		See if workflow-generated dataset exists in user's history.  The only way to spot this 
 | 
| 
 | 
   747 		is to find some dataset in user's history that has workflow_id in its "annotation" field.
 | 
| 
 | 
   748 		We added the specific dataset id's that were used as input to the workflow as well as the 
 | 
| 
 | 
   749 		workflow key since same workflow could have been run on different inputs.
 | 
| 
 | 
   750 		
 | 
| 
 | 
   751 		@param annotation_key string Contains workflow id and input dataset ids..
 | 
| 
 | 
   752 		"""
 | 
| 
 | 
   753 		history_datasets = self.user_api.histories.show_history(self.history_id, contents=True, deleted=False, visible=True, details='all') # , types=None
 | 
| 
 | 
   754 		dataset_ids = []
 | 
| 
 | 
   755 		
 | 
| 
 | 
   756 		for dataset in history_datasets:
 | 
| 
 | 
   757 			if dataset['annotation'] == annotation:
 | 
| 
 | 
   758 				if dataset['accessible'] == True  and dataset['state'] == 'ok':
 | 
| 
 | 
   759 					dataset_ids.append(dataset['id'])
 | 
| 
 | 
   760 				else: 
 | 
| 
 | 
   761 					print "Warning: dataset " + dataset['name'] + " is in an error state [ " + dataset['state'] + "] so skipped!"
 | 
| 
 | 
   762 					
 | 
| 
 | 
   763 		return dataset_ids
 | 
| 
 | 
   764 
 | 
| 
 | 
   765 
 | 
| 
 | 
   766 	def cache_datasets(self, library_cache_path, work_result, workflow_summary, codings, version_id, history_id):
 | 
| 
 | 
   767 		""" 
 | 
| 
 | 
   768 		Use the Galaxy API to LINK versioned data api admin user's history workflow-created item(s) into the appropriate Versioned Data Workflow Cache folder.  Doing this via API call so that metadata is preserved, e.g. preserving that it is a product of makeblastdb/formatdb and all that entails.  Only then does Galaxy remain knowledgeable about datatype/data collection.		
 | 
| 
 | 
   769 		
 | 
| 
 | 
   770 		Then user gets link to workflow dataset in their history.  (If a galaxy user deletes a workflow dataset in their history they actually only deletes their history link to that dataset. True of api admin user?)
 | 
| 
 | 
   771 		
 | 
| 
 | 
   772 		FUTURE: have the galaxy-created data shared from a server location?
 | 
| 
 | 
   773 		"""
 | 
| 
 | 
   774 
 | 
| 
 | 
   775 		(workflow_input_key, workflow_input_label, annotation_key, dataset_map) = codings
 | 
| 
 | 
   776 
 | 
| 
 | 
   777 		# This will create folder if it doesn't exist:
 | 
| 
 | 
   778 		_library_cache_labels = os.path.join("/", VDB_WORKFLOW_CACHE_FOLDER_NAME, workflow_summary['name'], 'On ' + workflow_input_label)
 | 
| 
 | 
   779 		folder_id = self.get_library_folder("/", library_cache_path, _library_cache_labels)
 | 
| 
 | 
   780 		if not folder_id: # Case should never happen
 | 
| 
 | 
   781 			print 'Error: unable to determine library folder to place cache in:' + library_cache_path
 | 
| 
 | 
   782 			sys.exit(1)
 | 
| 
 | 
   783 
 | 
| 
 | 
   784 		
 | 
| 
 | 
   785 		for dataset_id in work_result['outputs']:
 | 
| 
 | 
   786 			# We have to mark each dataset entry with the Workflow ID and input datasets it was generated by. 
 | 
| 
 | 
   787 			# No other way to know they are associated. ADD VERSION ID TO END OF workflowinput_label?
 | 
| 
 | 
   788 			label = workflow_summary['name'] +' on ' + workflow_input_label 
 | 
| 
 | 
   789 			
 | 
| 
 | 
   790 			# THIS WILL BE IN ADMIN API HISTORY
 | 
| 
 | 
   791 			self.admin_api.histories.update_dataset(history_id, dataset_id, annotation = annotation_key, name=label) 
 | 
| 
 | 
   792 
 | 
| 
 | 
   793 			# Upload dataset_id and give it description 'cached data'
 | 
| 
 | 
   794 			if 'copy_from_dataset' in dir(self.admin_api.libraries):
 | 
| 
 | 
   795 				# IN BIOBLEND LATEST:
 | 
| 
 | 
   796 				self.admin_api.libraries.copy_from_dataset(self.library_id, dataset_id, folder_id, VDB_CACHED_DATA_LABEL + ": version " + version_id)
 | 
| 
 | 
   797 			else:
 | 
| 
 | 
   798 				self.library_cache_setup_privileged(folder_id, dataset_id, VDB_CACHED_DATA_LABEL + ": version " + version_id)
 | 
| 
 | 
   799 
 | 
| 
 | 
   800 
 | 
| 
 | 
   801 
 | 
| 
 | 
   802 	def library_cache_setup_privileged(self, folder_id, dataset_id, message):
 | 
| 
 | 
   803 		"""
 | 
| 
 | 
   804 		Copy a history HDA into a library LDDA (that the current admin api user has add permissions on)
 | 
| 
 | 
   805 		in the given library and library folder.  Requires that dataset_id has been created by admin_api_key user.	 Nicola Soranzo [nicola.soranzo@gmail.com will be adding to BIOBLEND eventually.
 | 
| 
 | 
   806 	
 | 
| 
 | 
   807 		We tried linking a Versioned Data library Workflow Cache folder to the dataset(s) a non-admin api user has just generated.  It turns out API user that connects the two must be both a Library admin AND the owner of the history dataset being uploaded, or an error occurs.  So system can't do action on behalf of non-library-privileged user.  Second complication with that approach is that there is no Bioblend API call - one must do this directly in galaxy API via direct URL fetc.
 | 
| 
 | 
   808 
 | 
| 
 | 
   809 		NOTE: This will raise "HTTPError(req.get_full_url(), code, msg, hdrs, fp)" if given empty folder_id for example
 | 
| 
 | 
   810 	
 | 
| 
 | 
   811 		@see def copy_hda_to_ldda( library_id, library_folder_id, hda_id, message='' ):
 | 
| 
 | 
   812 		@see https://wiki.galaxyproject.org/Events/GCC2013/TrainingDay/API?action=AttachFile&do=view&target=lddas_1.py
 | 
| 
 | 
   813 
 | 
| 
 | 
   814 		@uses library_id: the id of the library which we want to query.
 | 
| 
 | 
   815 	
 | 
| 
 | 
   816 		@param dataset_id: the id of the user's history dataset we want to copy into the library folder.
 | 
| 
 | 
   817 		@param folder_id: the id of the library folder to copy into.
 | 
| 
 | 
   818 		@param message: an optional message to add to the new LDDA.
 | 
| 
 | 
   819 		"""
 | 
| 
 | 
   820 
 | 
| 
 | 
   821 
 | 
| 
 | 
   822 
 | 
| 
 | 
   823 		full_url = self.api_url + '/libraries' + '/' + self.library_id + '/contents'
 | 
| 
 | 
   824 		url = self.make_url( self.admin_api_key, full_url )
 | 
| 
 | 
   825 			
 | 
| 
 | 
   826 		post_data = {
 | 
| 
 | 
   827 		'folder_id'     : folder_id,
 | 
| 
 | 
   828 		'create_type'   : 'file',
 | 
| 
 | 
   829 		'from_hda_id'   : dataset_id,
 | 
| 
 | 
   830 		'ldda_message'  : message
 | 
| 
 | 
   831 		}
 | 
| 
 | 
   832 
 | 
| 
 | 
   833 		req = urllib2.Request( url, headers = { 'Content-Type': 'application/json' }, data = json.dumps(  post_data ) )
 | 
| 
 | 
   834 		#try:
 | 
| 
 | 
   835 
 | 
| 
 | 
   836 		results = json.loads( urllib2.urlopen( req ).read() ) 
 | 
| 
 | 
   837 		return
 | 
| 
 | 
   838 
 | 
| 
 | 
   839 
 | 
| 
 | 
   840 	#Expecting to phase this out with bioblend api call for library_cache_setup()
 | 
| 
 | 
   841 	def make_url(self, api_key, url, args=None ):
 | 
| 
 | 
   842 		# Adds the API Key to the URL if it's not already there.
 | 
| 
 | 
   843 		if args is None:
 | 
| 
 | 
   844 			args = []
 | 
| 
 | 
   845 		argsep = '&'
 | 
| 
 | 
   846 		if '?' not in url:
 | 
| 
 | 
   847 			argsep = '?'
 | 
| 
 | 
   848 		if '?key=' not in url and '&key=' not in url:
 | 
| 
 | 
   849 			args.insert( 0, ( 'key', api_key ) )
 | 
| 
 | 
   850 		return url + argsep + '&'.join( [ '='.join( t ) for t in args ] )
 | 
| 
 | 
   851 
 | 
| 
 | 
   852 
 | 
| 
 | 
   853 
 |