Mercurial > repos > shellac > guppy_basecaller
comparison env/lib/python3.7/site-packages/boto/machinelearning/layer1.py @ 2:6af9afd405e9 draft
"planemo upload commit 0a63dd5f4d38a1f6944587f52a8cd79874177fc1"
author | shellac |
---|---|
date | Thu, 14 May 2020 14:56:58 -0400 |
parents | 26e78fe6e8c4 |
children |
comparison
equal
deleted
inserted
replaced
1:75ca89e9b81c | 2:6af9afd405e9 |
---|---|
1 # Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved | |
2 # | |
3 # Permission is hereby granted, free of charge, to any person obtaining a | |
4 # copy of this software and associated documentation files (the | |
5 # "Software"), to deal in the Software without restriction, including | |
6 # without limitation the rights to use, copy, modify, merge, publish, dis- | |
7 # tribute, sublicense, and/or sell copies of the Software, and to permit | |
8 # persons to whom the Software is furnished to do so, subject to the fol- | |
9 # lowing conditions: | |
10 # | |
11 # The above copyright notice and this permission notice shall be included | |
12 # in all copies or substantial portions of the Software. | |
13 # | |
14 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | |
15 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- | |
16 # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT | |
17 # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, | |
18 # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
19 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
20 # IN THE SOFTWARE. | |
21 # | |
22 | |
23 import boto | |
24 from boto.compat import json, urlsplit | |
25 from boto.connection import AWSQueryConnection | |
26 from boto.regioninfo import RegionInfo | |
27 from boto.exception import JSONResponseError | |
28 from boto.machinelearning import exceptions | |
29 | |
30 | |
31 class MachineLearningConnection(AWSQueryConnection): | |
32 """ | |
33 Definition of the public APIs exposed by Amazon Machine Learning | |
34 """ | |
35 APIVersion = "2014-12-12" | |
36 AuthServiceName = 'machinelearning' | |
37 DefaultRegionName = "us-east-1" | |
38 DefaultRegionEndpoint = "machinelearning.us-east-1.amazonaws.com" | |
39 ServiceName = "MachineLearning" | |
40 TargetPrefix = "AmazonML_20141212" | |
41 ResponseError = JSONResponseError | |
42 | |
43 _faults = { | |
44 "InternalServerException": exceptions.InternalServerException, | |
45 "LimitExceededException": exceptions.LimitExceededException, | |
46 "ResourceNotFoundException": exceptions.ResourceNotFoundException, | |
47 "IdempotentParameterMismatchException": exceptions.IdempotentParameterMismatchException, | |
48 "PredictorNotMountedException": exceptions.PredictorNotMountedException, | |
49 "InvalidInputException": exceptions.InvalidInputException, | |
50 } | |
51 | |
52 | |
53 def __init__(self, **kwargs): | |
54 region = kwargs.pop('region', None) | |
55 if not region: | |
56 region = RegionInfo(self, self.DefaultRegionName, | |
57 self.DefaultRegionEndpoint) | |
58 | |
59 if 'host' not in kwargs or kwargs['host'] is None: | |
60 kwargs['host'] = region.endpoint | |
61 | |
62 super(MachineLearningConnection, self).__init__(**kwargs) | |
63 self.region = region | |
64 self.auth_region_name = self.region.name | |
65 | |
66 def _required_auth_capability(self): | |
67 return ['hmac-v4'] | |
68 | |
69 def create_batch_prediction(self, batch_prediction_id, ml_model_id, | |
70 batch_prediction_data_source_id, output_uri, | |
71 batch_prediction_name=None): | |
72 """ | |
73 Generates predictions for a group of observations. The | |
74 observations to process exist in one or more data files | |
75 referenced by a `DataSource`. This operation creates a new | |
76 `BatchPrediction`, and uses an `MLModel` and the data files | |
77 referenced by the `DataSource` as information sources. | |
78 | |
79 `CreateBatchPrediction` is an asynchronous operation. In | |
80 response to `CreateBatchPrediction`, Amazon Machine Learning | |
81 (Amazon ML) immediately returns and sets the `BatchPrediction` | |
82 status to `PENDING`. After the `BatchPrediction` completes, | |
83 Amazon ML sets the status to `COMPLETED`. | |
84 | |
85 You can poll for status updates by using the | |
86 GetBatchPrediction operation and checking the `Status` | |
87 parameter of the result. After the `COMPLETED` status appears, | |
88 the results are available in the location specified by the | |
89 `OutputUri` parameter. | |
90 | |
91 :type batch_prediction_id: string | |
92 :param batch_prediction_id: A user-supplied ID that uniquely identifies | |
93 the `BatchPrediction`. | |
94 | |
95 :type batch_prediction_name: string | |
96 :param batch_prediction_name: A user-supplied name or description of | |
97 the `BatchPrediction`. `BatchPredictionName` can only use the UTF-8 | |
98 character set. | |
99 | |
100 :type ml_model_id: string | |
101 :param ml_model_id: The ID of the `MLModel` that will generate | |
102 predictions for the group of observations. | |
103 | |
104 :type batch_prediction_data_source_id: string | |
105 :param batch_prediction_data_source_id: The ID of the `DataSource` that | |
106 points to the group of observations to predict. | |
107 | |
108 :type output_uri: string | |
109 :param output_uri: The location of an Amazon Simple Storage Service | |
110 (Amazon S3) bucket or directory to store the batch prediction | |
111 results. The following substrings are not allowed in the s3 key | |
112 portion of the "outputURI" field: ':', '//', '/./', '/../'. | |
113 Amazon ML needs permissions to store and retrieve the logs on your | |
114 behalf. For information about how to set permissions, see the | |
115 `Amazon Machine Learning Developer Guide`_. | |
116 | |
117 """ | |
118 params = { | |
119 'BatchPredictionId': batch_prediction_id, | |
120 'MLModelId': ml_model_id, | |
121 'BatchPredictionDataSourceId': batch_prediction_data_source_id, | |
122 'OutputUri': output_uri, | |
123 } | |
124 if batch_prediction_name is not None: | |
125 params['BatchPredictionName'] = batch_prediction_name | |
126 return self.make_request(action='CreateBatchPrediction', | |
127 body=json.dumps(params)) | |
128 | |
129 def create_data_source_from_rds(self, data_source_id, rds_data, role_arn, | |
130 data_source_name=None, | |
131 compute_statistics=None): | |
132 """ | |
133 Creates a `DataSource` object from an ` Amazon Relational | |
134 Database Service`_ (Amazon RDS). A `DataSource` references | |
135 data that can be used to perform CreateMLModel, | |
136 CreateEvaluation, or CreateBatchPrediction operations. | |
137 | |
138 `CreateDataSourceFromRDS` is an asynchronous operation. In | |
139 response to `CreateDataSourceFromRDS`, Amazon Machine Learning | |
140 (Amazon ML) immediately returns and sets the `DataSource` | |
141 status to `PENDING`. After the `DataSource` is created and | |
142 ready for use, Amazon ML sets the `Status` parameter to | |
143 `COMPLETED`. `DataSource` in `COMPLETED` or `PENDING` status | |
144 can only be used to perform CreateMLModel, CreateEvaluation, | |
145 or CreateBatchPrediction operations. | |
146 | |
147 If Amazon ML cannot accept the input source, it sets the | |
148 `Status` parameter to `FAILED` and includes an error message | |
149 in the `Message` attribute of the GetDataSource operation | |
150 response. | |
151 | |
152 :type data_source_id: string | |
153 :param data_source_id: A user-supplied ID that uniquely identifies the | |
154 `DataSource`. Typically, an Amazon Resource Number (ARN) becomes | |
155 the ID for a `DataSource`. | |
156 | |
157 :type data_source_name: string | |
158 :param data_source_name: A user-supplied name or description of the | |
159 `DataSource`. | |
160 | |
161 :type rds_data: dict | |
162 :param rds_data: | |
163 The data specification of an Amazon RDS `DataSource`: | |
164 | |
165 | |
166 + DatabaseInformation - | |
167 | |
168 + `DatabaseName ` - Name of the Amazon RDS database. | |
169 + ` InstanceIdentifier ` - Unique identifier for the Amazon RDS | |
170 database instance. | |
171 | |
172 + DatabaseCredentials - AWS Identity and Access Management (IAM) | |
173 credentials that are used to connect to the Amazon RDS database. | |
174 + ResourceRole - Role (DataPipelineDefaultResourceRole) assumed by an | |
175 Amazon Elastic Compute Cloud (EC2) instance to carry out the copy | |
176 task from Amazon RDS to Amazon S3. For more information, see `Role | |
177 templates`_ for data pipelines. | |
178 + ServiceRole - Role (DataPipelineDefaultRole) assumed by the AWS Data | |
179 Pipeline service to monitor the progress of the copy task from | |
180 Amazon RDS to Amazon Simple Storage Service (S3). For more | |
181 information, see `Role templates`_ for data pipelines. | |
182 + SecurityInfo - Security information to use to access an Amazon RDS | |
183 instance. You need to set up appropriate ingress rules for the | |
184 security entity IDs provided to allow access to the Amazon RDS | |
185 instance. Specify a [ `SubnetId`, `SecurityGroupIds`] pair for a | |
186 VPC-based Amazon RDS instance. | |
187 + SelectSqlQuery - Query that is used to retrieve the observation data | |
188 for the `Datasource`. | |
189 + S3StagingLocation - Amazon S3 location for staging RDS data. The data | |
190 retrieved from Amazon RDS using `SelectSqlQuery` is stored in this | |
191 location. | |
192 + DataSchemaUri - Amazon S3 location of the `DataSchema`. | |
193 + DataSchema - A JSON string representing the schema. This is not | |
194 required if `DataSchemaUri` is specified. | |
195 + DataRearrangement - A JSON string representing the splitting | |
196 requirement of a `Datasource`. Sample - ` "{\"randomSeed\":\"some- | |
197 random-seed\", | |
198 \"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}"` | |
199 | |
200 :type role_arn: string | |
201 :param role_arn: The role that Amazon ML assumes on behalf of the user | |
202 to create and activate a data pipeline in the users account and | |
203 copy data (using the `SelectSqlQuery`) query from Amazon RDS to | |
204 Amazon S3. | |
205 | |
206 :type compute_statistics: boolean | |
207 :param compute_statistics: The compute statistics for a `DataSource`. | |
208 The statistics are generated from the observation data referenced | |
209 by a `DataSource`. Amazon ML uses the statistics internally during | |
210 an `MLModel` training. This parameter must be set to `True` if the | |
211 ``DataSource `` needs to be used for `MLModel` training. | |
212 | |
213 """ | |
214 params = { | |
215 'DataSourceId': data_source_id, | |
216 'RDSData': rds_data, | |
217 'RoleARN': role_arn, | |
218 } | |
219 if data_source_name is not None: | |
220 params['DataSourceName'] = data_source_name | |
221 if compute_statistics is not None: | |
222 params['ComputeStatistics'] = compute_statistics | |
223 return self.make_request(action='CreateDataSourceFromRDS', | |
224 body=json.dumps(params)) | |
225 | |
226 def create_data_source_from_redshift(self, data_source_id, data_spec, | |
227 role_arn, data_source_name=None, | |
228 compute_statistics=None): | |
229 """ | |
230 Creates a `DataSource` from `Amazon Redshift`_. A `DataSource` | |
231 references data that can be used to perform either | |
232 CreateMLModel, CreateEvaluation or CreateBatchPrediction | |
233 operations. | |
234 | |
235 `CreateDataSourceFromRedshift` is an asynchronous operation. | |
236 In response to `CreateDataSourceFromRedshift`, Amazon Machine | |
237 Learning (Amazon ML) immediately returns and sets the | |
238 `DataSource` status to `PENDING`. After the `DataSource` is | |
239 created and ready for use, Amazon ML sets the `Status` | |
240 parameter to `COMPLETED`. `DataSource` in `COMPLETED` or | |
241 `PENDING` status can only be used to perform CreateMLModel, | |
242 CreateEvaluation, or CreateBatchPrediction operations. | |
243 | |
244 If Amazon ML cannot accept the input source, it sets the | |
245 `Status` parameter to `FAILED` and includes an error message | |
246 in the `Message` attribute of the GetDataSource operation | |
247 response. | |
248 | |
249 The observations should exist in the database hosted on an | |
250 Amazon Redshift cluster and should be specified by a | |
251 `SelectSqlQuery`. Amazon ML executes ` Unload`_ command in | |
252 Amazon Redshift to transfer the result set of `SelectSqlQuery` | |
253 to `S3StagingLocation.` | |
254 | |
255 After the `DataSource` is created, it's ready for use in | |
256 evaluations and batch predictions. If you plan to use the | |
257 `DataSource` to train an `MLModel`, the `DataSource` requires | |
258 another item -- a recipe. A recipe describes the observation | |
259 variables that participate in training an `MLModel`. A recipe | |
260 describes how each input variable will be used in training. | |
261 Will the variable be included or excluded from training? Will | |
262 the variable be manipulated, for example, combined with | |
263 another variable or split apart into word combinations? The | |
264 recipe provides answers to these questions. For more | |
265 information, see the Amazon Machine Learning Developer Guide. | |
266 | |
267 :type data_source_id: string | |
268 :param data_source_id: A user-supplied ID that uniquely identifies the | |
269 `DataSource`. | |
270 | |
271 :type data_source_name: string | |
272 :param data_source_name: A user-supplied name or description of the | |
273 `DataSource`. | |
274 | |
275 :type data_spec: dict | |
276 :param data_spec: | |
277 The data specification of an Amazon Redshift `DataSource`: | |
278 | |
279 | |
280 + DatabaseInformation - | |
281 | |
282 + `DatabaseName ` - Name of the Amazon Redshift database. | |
283 + ` ClusterIdentifier ` - Unique ID for the Amazon Redshift cluster. | |
284 | |
285 + DatabaseCredentials - AWS Identity abd Access Management (IAM) | |
286 credentials that are used to connect to the Amazon Redshift | |
287 database. | |
288 + SelectSqlQuery - Query that is used to retrieve the observation data | |
289 for the `Datasource`. | |
290 + S3StagingLocation - Amazon Simple Storage Service (Amazon S3) | |
291 location for staging Amazon Redshift data. The data retrieved from | |
292 Amazon Relational Database Service (Amazon RDS) using | |
293 `SelectSqlQuery` is stored in this location. | |
294 + DataSchemaUri - Amazon S3 location of the `DataSchema`. | |
295 + DataSchema - A JSON string representing the schema. This is not | |
296 required if `DataSchemaUri` is specified. | |
297 + DataRearrangement - A JSON string representing the splitting | |
298 requirement of a `Datasource`. Sample - ` "{\"randomSeed\":\"some- | |
299 random-seed\", | |
300 \"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}"` | |
301 | |
302 :type role_arn: string | |
303 :param role_arn: A fully specified role Amazon Resource Name (ARN). | |
304 Amazon ML assumes the role on behalf of the user to create the | |
305 following: | |
306 | |
307 | |
308 + A security group to allow Amazon ML to execute the `SelectSqlQuery` | |
309 query on an Amazon Redshift cluster | |
310 + An Amazon S3 bucket policy to grant Amazon ML read/write permissions | |
311 on the `S3StagingLocation` | |
312 | |
313 :type compute_statistics: boolean | |
314 :param compute_statistics: The compute statistics for a `DataSource`. | |
315 The statistics are generated from the observation data referenced | |
316 by a `DataSource`. Amazon ML uses the statistics internally during | |
317 `MLModel` training. This parameter must be set to `True` if the | |
318 ``DataSource `` needs to be used for `MLModel` training | |
319 | |
320 """ | |
321 params = { | |
322 'DataSourceId': data_source_id, | |
323 'DataSpec': data_spec, | |
324 'RoleARN': role_arn, | |
325 } | |
326 if data_source_name is not None: | |
327 params['DataSourceName'] = data_source_name | |
328 if compute_statistics is not None: | |
329 params['ComputeStatistics'] = compute_statistics | |
330 return self.make_request(action='CreateDataSourceFromRedshift', | |
331 body=json.dumps(params)) | |
332 | |
333 def create_data_source_from_s3(self, data_source_id, data_spec, | |
334 data_source_name=None, | |
335 compute_statistics=None): | |
336 """ | |
337 Creates a `DataSource` object. A `DataSource` references data | |
338 that can be used to perform CreateMLModel, CreateEvaluation, | |
339 or CreateBatchPrediction operations. | |
340 | |
341 `CreateDataSourceFromS3` is an asynchronous operation. In | |
342 response to `CreateDataSourceFromS3`, Amazon Machine Learning | |
343 (Amazon ML) immediately returns and sets the `DataSource` | |
344 status to `PENDING`. After the `DataSource` is created and | |
345 ready for use, Amazon ML sets the `Status` parameter to | |
346 `COMPLETED`. `DataSource` in `COMPLETED` or `PENDING` status | |
347 can only be used to perform CreateMLModel, CreateEvaluation or | |
348 CreateBatchPrediction operations. | |
349 | |
350 If Amazon ML cannot accept the input source, it sets the | |
351 `Status` parameter to `FAILED` and includes an error message | |
352 in the `Message` attribute of the GetDataSource operation | |
353 response. | |
354 | |
355 The observation data used in a `DataSource` should be ready to | |
356 use; that is, it should have a consistent structure, and | |
357 missing data values should be kept to a minimum. The | |
358 observation data must reside in one or more CSV files in an | |
359 Amazon Simple Storage Service (Amazon S3) bucket, along with a | |
360 schema that describes the data items by name and type. The | |
361 same schema must be used for all of the data files referenced | |
362 by the `DataSource`. | |
363 | |
364 After the `DataSource` has been created, it's ready to use in | |
365 evaluations and batch predictions. If you plan to use the | |
366 `DataSource` to train an `MLModel`, the `DataSource` requires | |
367 another item: a recipe. A recipe describes the observation | |
368 variables that participate in training an `MLModel`. A recipe | |
369 describes how each input variable will be used in training. | |
370 Will the variable be included or excluded from training? Will | |
371 the variable be manipulated, for example, combined with | |
372 another variable, or split apart into word combinations? The | |
373 recipe provides answers to these questions. For more | |
374 information, see the `Amazon Machine Learning Developer | |
375 Guide`_. | |
376 | |
377 :type data_source_id: string | |
378 :param data_source_id: A user-supplied identifier that uniquely | |
379 identifies the `DataSource`. | |
380 | |
381 :type data_source_name: string | |
382 :param data_source_name: A user-supplied name or description of the | |
383 `DataSource`. | |
384 | |
385 :type data_spec: dict | |
386 :param data_spec: | |
387 The data specification of a `DataSource`: | |
388 | |
389 | |
390 + DataLocationS3 - Amazon Simple Storage Service (Amazon S3) location | |
391 of the observation data. | |
392 + DataSchemaLocationS3 - Amazon S3 location of the `DataSchema`. | |
393 + DataSchema - A JSON string representing the schema. This is not | |
394 required if `DataSchemaUri` is specified. | |
395 + DataRearrangement - A JSON string representing the splitting | |
396 requirement of a `Datasource`. Sample - ` "{\"randomSeed\":\"some- | |
397 random-seed\", | |
398 \"splitting\":{\"percentBegin\":10,\"percentEnd\":60}}"` | |
399 | |
400 :type compute_statistics: boolean | |
401 :param compute_statistics: The compute statistics for a `DataSource`. | |
402 The statistics are generated from the observation data referenced | |
403 by a `DataSource`. Amazon ML uses the statistics internally during | |
404 an `MLModel` training. This parameter must be set to `True` if the | |
405 ``DataSource `` needs to be used for `MLModel` training | |
406 | |
407 """ | |
408 params = { | |
409 'DataSourceId': data_source_id, | |
410 'DataSpec': data_spec, | |
411 } | |
412 if data_source_name is not None: | |
413 params['DataSourceName'] = data_source_name | |
414 if compute_statistics is not None: | |
415 params['ComputeStatistics'] = compute_statistics | |
416 return self.make_request(action='CreateDataSourceFromS3', | |
417 body=json.dumps(params)) | |
418 | |
419 def create_evaluation(self, evaluation_id, ml_model_id, | |
420 evaluation_data_source_id, evaluation_name=None): | |
421 """ | |
422 Creates a new `Evaluation` of an `MLModel`. An `MLModel` is | |
423 evaluated on a set of observations associated to a | |
424 `DataSource`. Like a `DataSource` for an `MLModel`, the | |
425 `DataSource` for an `Evaluation` contains values for the | |
426 Target Variable. The `Evaluation` compares the predicted | |
427 result for each observation to the actual outcome and provides | |
428 a summary so that you know how effective the `MLModel` | |
429 functions on the test data. Evaluation generates a relevant | |
430 performance metric such as BinaryAUC, RegressionRMSE or | |
431 MulticlassAvgFScore based on the corresponding `MLModelType`: | |
432 `BINARY`, `REGRESSION` or `MULTICLASS`. | |
433 | |
434 `CreateEvaluation` is an asynchronous operation. In response | |
435 to `CreateEvaluation`, Amazon Machine Learning (Amazon ML) | |
436 immediately returns and sets the evaluation status to | |
437 `PENDING`. After the `Evaluation` is created and ready for | |
438 use, Amazon ML sets the status to `COMPLETED`. | |
439 | |
440 You can use the GetEvaluation operation to check progress of | |
441 the evaluation during the creation operation. | |
442 | |
443 :type evaluation_id: string | |
444 :param evaluation_id: A user-supplied ID that uniquely identifies the | |
445 `Evaluation`. | |
446 | |
447 :type evaluation_name: string | |
448 :param evaluation_name: A user-supplied name or description of the | |
449 `Evaluation`. | |
450 | |
451 :type ml_model_id: string | |
452 :param ml_model_id: The ID of the `MLModel` to evaluate. | |
453 The schema used in creating the `MLModel` must match the schema of the | |
454 `DataSource` used in the `Evaluation`. | |
455 | |
456 :type evaluation_data_source_id: string | |
457 :param evaluation_data_source_id: The ID of the `DataSource` for the | |
458 evaluation. The schema of the `DataSource` must match the schema | |
459 used to create the `MLModel`. | |
460 | |
461 """ | |
462 params = { | |
463 'EvaluationId': evaluation_id, | |
464 'MLModelId': ml_model_id, | |
465 'EvaluationDataSourceId': evaluation_data_source_id, | |
466 } | |
467 if evaluation_name is not None: | |
468 params['EvaluationName'] = evaluation_name | |
469 return self.make_request(action='CreateEvaluation', | |
470 body=json.dumps(params)) | |
471 | |
472 def create_ml_model(self, ml_model_id, ml_model_type, | |
473 training_data_source_id, ml_model_name=None, | |
474 parameters=None, recipe=None, recipe_uri=None): | |
475 """ | |
476 Creates a new `MLModel` using the data files and the recipe as | |
477 information sources. | |
478 | |
479 An `MLModel` is nearly immutable. Users can only update the | |
480 `MLModelName` and the `ScoreThreshold` in an `MLModel` without | |
481 creating a new `MLModel`. | |
482 | |
483 `CreateMLModel` is an asynchronous operation. In response to | |
484 `CreateMLModel`, Amazon Machine Learning (Amazon ML) | |
485 immediately returns and sets the `MLModel` status to | |
486 `PENDING`. After the `MLModel` is created and ready for use, | |
487 Amazon ML sets the status to `COMPLETED`. | |
488 | |
489 You can use the GetMLModel operation to check progress of the | |
490 `MLModel` during the creation operation. | |
491 | |
492 CreateMLModel requires a `DataSource` with computed | |
493 statistics, which can be created by setting | |
494 `ComputeStatistics` to `True` in CreateDataSourceFromRDS, | |
495 CreateDataSourceFromS3, or CreateDataSourceFromRedshift | |
496 operations. | |
497 | |
498 :type ml_model_id: string | |
499 :param ml_model_id: A user-supplied ID that uniquely identifies the | |
500 `MLModel`. | |
501 | |
502 :type ml_model_name: string | |
503 :param ml_model_name: A user-supplied name or description of the | |
504 `MLModel`. | |
505 | |
506 :type ml_model_type: string | |
507 :param ml_model_type: The category of supervised learning that this | |
508 `MLModel` will address. Choose from the following types: | |
509 | |
510 + Choose `REGRESSION` if the `MLModel` will be used to predict a | |
511 numeric value. | |
512 + Choose `BINARY` if the `MLModel` result has two possible values. | |
513 + Choose `MULTICLASS` if the `MLModel` result has a limited number of | |
514 values. | |
515 | |
516 | |
517 For more information, see the `Amazon Machine Learning Developer | |
518 Guide`_. | |
519 | |
520 :type parameters: map | |
521 :param parameters: | |
522 A list of the training parameters in the `MLModel`. The list is | |
523 implemented as a map of key/value pairs. | |
524 | |
525 The following is the current set of training parameters: | |
526 | |
527 | |
528 + `sgd.l1RegularizationAmount` - Coefficient regularization L1 norm. It | |
529 controls overfitting the data by penalizing large coefficients. | |
530 This tends to drive coefficients to zero, resulting in sparse | |
531 feature set. If you use this parameter, start by specifying a small | |
532 value such as 1.0E-08. The value is a double that ranges from 0 to | |
533 MAX_DOUBLE. The default is not to use L1 normalization. The | |
534 parameter cannot be used when `L2` is specified. Use this parameter | |
535 sparingly. | |
536 + `sgd.l2RegularizationAmount` - Coefficient regularization L2 norm. It | |
537 controls overfitting the data by penalizing large coefficients. | |
538 This tends to drive coefficients to small, nonzero values. If you | |
539 use this parameter, start by specifying a small value such as | |
540 1.0E-08. The valuseis a double that ranges from 0 to MAX_DOUBLE. | |
541 The default is not to use L2 normalization. This cannot be used | |
542 when `L1` is specified. Use this parameter sparingly. | |
543 + `sgd.maxPasses` - Number of times that the training process traverses | |
544 the observations to build the `MLModel`. The value is an integer | |
545 that ranges from 1 to 10000. The default value is 10. | |
546 + `sgd.maxMLModelSizeInBytes` - Maximum allowed size of the model. | |
547 Depending on the input data, the size of the model might affect its | |
548 performance. The value is an integer that ranges from 100000 to | |
549 2147483648. The default value is 33554432. | |
550 | |
551 :type training_data_source_id: string | |
552 :param training_data_source_id: The `DataSource` that points to the | |
553 training data. | |
554 | |
555 :type recipe: string | |
556 :param recipe: The data recipe for creating `MLModel`. You must specify | |
557 either the recipe or its URI. If you dont specify a recipe or its | |
558 URI, Amazon ML creates a default. | |
559 | |
560 :type recipe_uri: string | |
561 :param recipe_uri: The Amazon Simple Storage Service (Amazon S3) | |
562 location and file name that contains the `MLModel` recipe. You must | |
563 specify either the recipe or its URI. If you dont specify a recipe | |
564 or its URI, Amazon ML creates a default. | |
565 | |
566 """ | |
567 params = { | |
568 'MLModelId': ml_model_id, | |
569 'MLModelType': ml_model_type, | |
570 'TrainingDataSourceId': training_data_source_id, | |
571 } | |
572 if ml_model_name is not None: | |
573 params['MLModelName'] = ml_model_name | |
574 if parameters is not None: | |
575 params['Parameters'] = parameters | |
576 if recipe is not None: | |
577 params['Recipe'] = recipe | |
578 if recipe_uri is not None: | |
579 params['RecipeUri'] = recipe_uri | |
580 return self.make_request(action='CreateMLModel', | |
581 body=json.dumps(params)) | |
582 | |
583 def create_realtime_endpoint(self, ml_model_id): | |
584 """ | |
585 Creates a real-time endpoint for the `MLModel`. The endpoint | |
586 contains the URI of the `MLModel`; that is, the location to | |
587 send real-time prediction requests for the specified | |
588 `MLModel`. | |
589 | |
590 :type ml_model_id: string | |
591 :param ml_model_id: The ID assigned to the `MLModel` during creation. | |
592 | |
593 """ | |
594 params = {'MLModelId': ml_model_id, } | |
595 return self.make_request(action='CreateRealtimeEndpoint', | |
596 body=json.dumps(params)) | |
597 | |
598 def delete_batch_prediction(self, batch_prediction_id): | |
599 """ | |
600 Assigns the DELETED status to a `BatchPrediction`, rendering | |
601 it unusable. | |
602 | |
603 After using the `DeleteBatchPrediction` operation, you can use | |
604 the GetBatchPrediction operation to verify that the status of | |
605 the `BatchPrediction` changed to DELETED. | |
606 | |
607 The result of the `DeleteBatchPrediction` operation is | |
608 irreversible. | |
609 | |
610 :type batch_prediction_id: string | |
611 :param batch_prediction_id: A user-supplied ID that uniquely identifies | |
612 the `BatchPrediction`. | |
613 | |
614 """ | |
615 params = {'BatchPredictionId': batch_prediction_id, } | |
616 return self.make_request(action='DeleteBatchPrediction', | |
617 body=json.dumps(params)) | |
618 | |
619 def delete_data_source(self, data_source_id): | |
620 """ | |
621 Assigns the DELETED status to a `DataSource`, rendering it | |
622 unusable. | |
623 | |
624 After using the `DeleteDataSource` operation, you can use the | |
625 GetDataSource operation to verify that the status of the | |
626 `DataSource` changed to DELETED. | |
627 | |
628 The results of the `DeleteDataSource` operation are | |
629 irreversible. | |
630 | |
631 :type data_source_id: string | |
632 :param data_source_id: A user-supplied ID that uniquely identifies the | |
633 `DataSource`. | |
634 | |
635 """ | |
636 params = {'DataSourceId': data_source_id, } | |
637 return self.make_request(action='DeleteDataSource', | |
638 body=json.dumps(params)) | |
639 | |
640 def delete_evaluation(self, evaluation_id): | |
641 """ | |
642 Assigns the `DELETED` status to an `Evaluation`, rendering it | |
643 unusable. | |
644 | |
645 After invoking the `DeleteEvaluation` operation, you can use | |
646 the GetEvaluation operation to verify that the status of the | |
647 `Evaluation` changed to `DELETED`. | |
648 | |
649 The results of the `DeleteEvaluation` operation are | |
650 irreversible. | |
651 | |
652 :type evaluation_id: string | |
653 :param evaluation_id: A user-supplied ID that uniquely identifies the | |
654 `Evaluation` to delete. | |
655 | |
656 """ | |
657 params = {'EvaluationId': evaluation_id, } | |
658 return self.make_request(action='DeleteEvaluation', | |
659 body=json.dumps(params)) | |
660 | |
661 def delete_ml_model(self, ml_model_id): | |
662 """ | |
663 Assigns the DELETED status to an `MLModel`, rendering it | |
664 unusable. | |
665 | |
666 After using the `DeleteMLModel` operation, you can use the | |
667 GetMLModel operation to verify that the status of the | |
668 `MLModel` changed to DELETED. | |
669 | |
670 The result of the `DeleteMLModel` operation is irreversible. | |
671 | |
672 :type ml_model_id: string | |
673 :param ml_model_id: A user-supplied ID that uniquely identifies the | |
674 `MLModel`. | |
675 | |
676 """ | |
677 params = {'MLModelId': ml_model_id, } | |
678 return self.make_request(action='DeleteMLModel', | |
679 body=json.dumps(params)) | |
680 | |
681 def delete_realtime_endpoint(self, ml_model_id): | |
682 """ | |
683 Deletes a real time endpoint of an `MLModel`. | |
684 | |
685 :type ml_model_id: string | |
686 :param ml_model_id: The ID assigned to the `MLModel` during creation. | |
687 | |
688 """ | |
689 params = {'MLModelId': ml_model_id, } | |
690 return self.make_request(action='DeleteRealtimeEndpoint', | |
691 body=json.dumps(params)) | |
692 | |
693 def describe_batch_predictions(self, filter_variable=None, eq=None, | |
694 gt=None, lt=None, ge=None, le=None, | |
695 ne=None, prefix=None, sort_order=None, | |
696 next_token=None, limit=None): | |
697 """ | |
698 Returns a list of `BatchPrediction` operations that match the | |
699 search criteria in the request. | |
700 | |
701 :type filter_variable: string | |
702 :param filter_variable: | |
703 Use one of the following variables to filter a list of | |
704 `BatchPrediction`: | |
705 | |
706 | |
707 + `CreatedAt` - Sets the search criteria to the `BatchPrediction` | |
708 creation date. | |
709 + `Status` - Sets the search criteria to the `BatchPrediction` status. | |
710 + `Name` - Sets the search criteria to the contents of the | |
711 `BatchPrediction` ** ** `Name`. | |
712 + `IAMUser` - Sets the search criteria to the user account that invoked | |
713 the `BatchPrediction` creation. | |
714 + `MLModelId` - Sets the search criteria to the `MLModel` used in the | |
715 `BatchPrediction`. | |
716 + `DataSourceId` - Sets the search criteria to the `DataSource` used in | |
717 the `BatchPrediction`. | |
718 + `DataURI` - Sets the search criteria to the data file(s) used in the | |
719 `BatchPrediction`. The URL can identify either a file or an Amazon | |
720 Simple Storage Solution (Amazon S3) bucket or directory. | |
721 | |
722 :type eq: string | |
723 :param eq: The equal to operator. The `BatchPrediction` results will | |
724 have `FilterVariable` values that exactly match the value specified | |
725 with `EQ`. | |
726 | |
727 :type gt: string | |
728 :param gt: The greater than operator. The `BatchPrediction` results | |
729 will have `FilterVariable` values that are greater than the value | |
730 specified with `GT`. | |
731 | |
732 :type lt: string | |
733 :param lt: The less than operator. The `BatchPrediction` results will | |
734 have `FilterVariable` values that are less than the value specified | |
735 with `LT`. | |
736 | |
737 :type ge: string | |
738 :param ge: The greater than or equal to operator. The `BatchPrediction` | |
739 results will have `FilterVariable` values that are greater than or | |
740 equal to the value specified with `GE`. | |
741 | |
742 :type le: string | |
743 :param le: The less than or equal to operator. The `BatchPrediction` | |
744 results will have `FilterVariable` values that are less than or | |
745 equal to the value specified with `LE`. | |
746 | |
747 :type ne: string | |
748 :param ne: The not equal to operator. The `BatchPrediction` results | |
749 will have `FilterVariable` values not equal to the value specified | |
750 with `NE`. | |
751 | |
752 :type prefix: string | |
753 :param prefix: | |
754 A string that is found at the beginning of a variable, such as `Name` | |
755 or `Id`. | |
756 | |
757 For example, a `Batch Prediction` operation could have the `Name` | |
758 `2014-09-09-HolidayGiftMailer`. To search for this | |
759 `BatchPrediction`, select `Name` for the `FilterVariable` and any | |
760 of the following strings for the `Prefix`: | |
761 | |
762 | |
763 + 2014-09 | |
764 + 2014-09-09 | |
765 + 2014-09-09-Holiday | |
766 | |
767 :type sort_order: string | |
768 :param sort_order: A two-value parameter that determines the sequence | |
769 of the resulting list of `MLModel`s. | |
770 | |
771 + `asc` - Arranges the list in ascending order (A-Z, 0-9). | |
772 + `dsc` - Arranges the list in descending order (Z-A, 9-0). | |
773 | |
774 | |
775 Results are sorted by `FilterVariable`. | |
776 | |
777 :type next_token: string | |
778 :param next_token: An ID of the page in the paginated results. | |
779 | |
780 :type limit: integer | |
781 :param limit: The number of pages of information to include in the | |
782 result. The range of acceptable values is 1 through 100. The | |
783 default value is 100. | |
784 | |
785 """ | |
786 params = {} | |
787 if filter_variable is not None: | |
788 params['FilterVariable'] = filter_variable | |
789 if eq is not None: | |
790 params['EQ'] = eq | |
791 if gt is not None: | |
792 params['GT'] = gt | |
793 if lt is not None: | |
794 params['LT'] = lt | |
795 if ge is not None: | |
796 params['GE'] = ge | |
797 if le is not None: | |
798 params['LE'] = le | |
799 if ne is not None: | |
800 params['NE'] = ne | |
801 if prefix is not None: | |
802 params['Prefix'] = prefix | |
803 if sort_order is not None: | |
804 params['SortOrder'] = sort_order | |
805 if next_token is not None: | |
806 params['NextToken'] = next_token | |
807 if limit is not None: | |
808 params['Limit'] = limit | |
809 return self.make_request(action='DescribeBatchPredictions', | |
810 body=json.dumps(params)) | |
811 | |
812 def describe_data_sources(self, filter_variable=None, eq=None, gt=None, | |
813 lt=None, ge=None, le=None, ne=None, | |
814 prefix=None, sort_order=None, next_token=None, | |
815 limit=None): | |
816 """ | |
817 Returns a list of `DataSource` that match the search criteria | |
818 in the request. | |
819 | |
820 :type filter_variable: string | |
821 :param filter_variable: | |
822 Use one of the following variables to filter a list of `DataSource`: | |
823 | |
824 | |
825 + `CreatedAt` - Sets the search criteria to `DataSource` creation | |
826 dates. | |
827 + `Status` - Sets the search criteria to `DataSource` statuses. | |
828 + `Name` - Sets the search criteria to the contents of `DataSource` ** | |
829 ** `Name`. | |
830 + `DataUri` - Sets the search criteria to the URI of data files used to | |
831 create the `DataSource`. The URI can identify either a file or an | |
832 Amazon Simple Storage Service (Amazon S3) bucket or directory. | |
833 + `IAMUser` - Sets the search criteria to the user account that invoked | |
834 the `DataSource` creation. | |
835 | |
836 :type eq: string | |
837 :param eq: The equal to operator. The `DataSource` results will have | |
838 `FilterVariable` values that exactly match the value specified with | |
839 `EQ`. | |
840 | |
841 :type gt: string | |
842 :param gt: The greater than operator. The `DataSource` results will | |
843 have `FilterVariable` values that are greater than the value | |
844 specified with `GT`. | |
845 | |
846 :type lt: string | |
847 :param lt: The less than operator. The `DataSource` results will have | |
848 `FilterVariable` values that are less than the value specified with | |
849 `LT`. | |
850 | |
851 :type ge: string | |
852 :param ge: The greater than or equal to operator. The `DataSource` | |
853 results will have `FilterVariable` values that are greater than or | |
854 equal to the value specified with `GE`. | |
855 | |
856 :type le: string | |
857 :param le: The less than or equal to operator. The `DataSource` results | |
858 will have `FilterVariable` values that are less than or equal to | |
859 the value specified with `LE`. | |
860 | |
861 :type ne: string | |
862 :param ne: The not equal to operator. The `DataSource` results will | |
863 have `FilterVariable` values not equal to the value specified with | |
864 `NE`. | |
865 | |
866 :type prefix: string | |
867 :param prefix: | |
868 A string that is found at the beginning of a variable, such as `Name` | |
869 or `Id`. | |
870 | |
871 For example, a `DataSource` could have the `Name` | |
872 `2014-09-09-HolidayGiftMailer`. To search for this `DataSource`, | |
873 select `Name` for the `FilterVariable` and any of the following | |
874 strings for the `Prefix`: | |
875 | |
876 | |
877 + 2014-09 | |
878 + 2014-09-09 | |
879 + 2014-09-09-Holiday | |
880 | |
881 :type sort_order: string | |
882 :param sort_order: A two-value parameter that determines the sequence | |
883 of the resulting list of `DataSource`. | |
884 | |
885 + `asc` - Arranges the list in ascending order (A-Z, 0-9). | |
886 + `dsc` - Arranges the list in descending order (Z-A, 9-0). | |
887 | |
888 | |
889 Results are sorted by `FilterVariable`. | |
890 | |
891 :type next_token: string | |
892 :param next_token: The ID of the page in the paginated results. | |
893 | |
894 :type limit: integer | |
895 :param limit: The maximum number of `DataSource` to include in the | |
896 result. | |
897 | |
898 """ | |
899 params = {} | |
900 if filter_variable is not None: | |
901 params['FilterVariable'] = filter_variable | |
902 if eq is not None: | |
903 params['EQ'] = eq | |
904 if gt is not None: | |
905 params['GT'] = gt | |
906 if lt is not None: | |
907 params['LT'] = lt | |
908 if ge is not None: | |
909 params['GE'] = ge | |
910 if le is not None: | |
911 params['LE'] = le | |
912 if ne is not None: | |
913 params['NE'] = ne | |
914 if prefix is not None: | |
915 params['Prefix'] = prefix | |
916 if sort_order is not None: | |
917 params['SortOrder'] = sort_order | |
918 if next_token is not None: | |
919 params['NextToken'] = next_token | |
920 if limit is not None: | |
921 params['Limit'] = limit | |
922 return self.make_request(action='DescribeDataSources', | |
923 body=json.dumps(params)) | |
924 | |
925 def describe_evaluations(self, filter_variable=None, eq=None, gt=None, | |
926 lt=None, ge=None, le=None, ne=None, prefix=None, | |
927 sort_order=None, next_token=None, limit=None): | |
928 """ | |
929 Returns a list of `DescribeEvaluations` that match the search | |
930 criteria in the request. | |
931 | |
932 :type filter_variable: string | |
933 :param filter_variable: | |
934 Use one of the following variable to filter a list of `Evaluation` | |
935 objects: | |
936 | |
937 | |
938 + `CreatedAt` - Sets the search criteria to the `Evaluation` creation | |
939 date. | |
940 + `Status` - Sets the search criteria to the `Evaluation` status. | |
941 + `Name` - Sets the search criteria to the contents of `Evaluation` ** | |
942 ** `Name`. | |
943 + `IAMUser` - Sets the search criteria to the user account that invoked | |
944 an `Evaluation`. | |
945 + `MLModelId` - Sets the search criteria to the `MLModel` that was | |
946 evaluated. | |
947 + `DataSourceId` - Sets the search criteria to the `DataSource` used in | |
948 `Evaluation`. | |
949 + `DataUri` - Sets the search criteria to the data file(s) used in | |
950 `Evaluation`. The URL can identify either a file or an Amazon | |
951 Simple Storage Solution (Amazon S3) bucket or directory. | |
952 | |
953 :type eq: string | |
954 :param eq: The equal to operator. The `Evaluation` results will have | |
955 `FilterVariable` values that exactly match the value specified with | |
956 `EQ`. | |
957 | |
958 :type gt: string | |
959 :param gt: The greater than operator. The `Evaluation` results will | |
960 have `FilterVariable` values that are greater than the value | |
961 specified with `GT`. | |
962 | |
963 :type lt: string | |
964 :param lt: The less than operator. The `Evaluation` results will have | |
965 `FilterVariable` values that are less than the value specified with | |
966 `LT`. | |
967 | |
968 :type ge: string | |
969 :param ge: The greater than or equal to operator. The `Evaluation` | |
970 results will have `FilterVariable` values that are greater than or | |
971 equal to the value specified with `GE`. | |
972 | |
973 :type le: string | |
974 :param le: The less than or equal to operator. The `Evaluation` results | |
975 will have `FilterVariable` values that are less than or equal to | |
976 the value specified with `LE`. | |
977 | |
978 :type ne: string | |
979 :param ne: The not equal to operator. The `Evaluation` results will | |
980 have `FilterVariable` values not equal to the value specified with | |
981 `NE`. | |
982 | |
983 :type prefix: string | |
984 :param prefix: | |
985 A string that is found at the beginning of a variable, such as `Name` | |
986 or `Id`. | |
987 | |
988 For example, an `Evaluation` could have the `Name` | |
989 `2014-09-09-HolidayGiftMailer`. To search for this `Evaluation`, | |
990 select `Name` for the `FilterVariable` and any of the following | |
991 strings for the `Prefix`: | |
992 | |
993 | |
994 + 2014-09 | |
995 + 2014-09-09 | |
996 + 2014-09-09-Holiday | |
997 | |
998 :type sort_order: string | |
999 :param sort_order: A two-value parameter that determines the sequence | |
1000 of the resulting list of `Evaluation`. | |
1001 | |
1002 + `asc` - Arranges the list in ascending order (A-Z, 0-9). | |
1003 + `dsc` - Arranges the list in descending order (Z-A, 9-0). | |
1004 | |
1005 | |
1006 Results are sorted by `FilterVariable`. | |
1007 | |
1008 :type next_token: string | |
1009 :param next_token: The ID of the page in the paginated results. | |
1010 | |
1011 :type limit: integer | |
1012 :param limit: The maximum number of `Evaluation` to include in the | |
1013 result. | |
1014 | |
1015 """ | |
1016 params = {} | |
1017 if filter_variable is not None: | |
1018 params['FilterVariable'] = filter_variable | |
1019 if eq is not None: | |
1020 params['EQ'] = eq | |
1021 if gt is not None: | |
1022 params['GT'] = gt | |
1023 if lt is not None: | |
1024 params['LT'] = lt | |
1025 if ge is not None: | |
1026 params['GE'] = ge | |
1027 if le is not None: | |
1028 params['LE'] = le | |
1029 if ne is not None: | |
1030 params['NE'] = ne | |
1031 if prefix is not None: | |
1032 params['Prefix'] = prefix | |
1033 if sort_order is not None: | |
1034 params['SortOrder'] = sort_order | |
1035 if next_token is not None: | |
1036 params['NextToken'] = next_token | |
1037 if limit is not None: | |
1038 params['Limit'] = limit | |
1039 return self.make_request(action='DescribeEvaluations', | |
1040 body=json.dumps(params)) | |
1041 | |
1042 def describe_ml_models(self, filter_variable=None, eq=None, gt=None, | |
1043 lt=None, ge=None, le=None, ne=None, prefix=None, | |
1044 sort_order=None, next_token=None, limit=None): | |
1045 """ | |
1046 Returns a list of `MLModel` that match the search criteria in | |
1047 the request. | |
1048 | |
1049 :type filter_variable: string | |
1050 :param filter_variable: | |
1051 Use one of the following variables to filter a list of `MLModel`: | |
1052 | |
1053 | |
1054 + `CreatedAt` - Sets the search criteria to `MLModel` creation date. | |
1055 + `Status` - Sets the search criteria to `MLModel` status. | |
1056 + `Name` - Sets the search criteria to the contents of `MLModel` ** ** | |
1057 `Name`. | |
1058 + `IAMUser` - Sets the search criteria to the user account that invoked | |
1059 the `MLModel` creation. | |
1060 + `TrainingDataSourceId` - Sets the search criteria to the `DataSource` | |
1061 used to train one or more `MLModel`. | |
1062 + `RealtimeEndpointStatus` - Sets the search criteria to the `MLModel` | |
1063 real-time endpoint status. | |
1064 + `MLModelType` - Sets the search criteria to `MLModel` type: binary, | |
1065 regression, or multi-class. | |
1066 + `Algorithm` - Sets the search criteria to the algorithm that the | |
1067 `MLModel` uses. | |
1068 + `TrainingDataURI` - Sets the search criteria to the data file(s) used | |
1069 in training a `MLModel`. The URL can identify either a file or an | |
1070 Amazon Simple Storage Service (Amazon S3) bucket or directory. | |
1071 | |
1072 :type eq: string | |
1073 :param eq: The equal to operator. The `MLModel` results will have | |
1074 `FilterVariable` values that exactly match the value specified with | |
1075 `EQ`. | |
1076 | |
1077 :type gt: string | |
1078 :param gt: The greater than operator. The `MLModel` results will have | |
1079 `FilterVariable` values that are greater than the value specified | |
1080 with `GT`. | |
1081 | |
1082 :type lt: string | |
1083 :param lt: The less than operator. The `MLModel` results will have | |
1084 `FilterVariable` values that are less than the value specified with | |
1085 `LT`. | |
1086 | |
1087 :type ge: string | |
1088 :param ge: The greater than or equal to operator. The `MLModel` results | |
1089 will have `FilterVariable` values that are greater than or equal to | |
1090 the value specified with `GE`. | |
1091 | |
1092 :type le: string | |
1093 :param le: The less than or equal to operator. The `MLModel` results | |
1094 will have `FilterVariable` values that are less than or equal to | |
1095 the value specified with `LE`. | |
1096 | |
1097 :type ne: string | |
1098 :param ne: The not equal to operator. The `MLModel` results will have | |
1099 `FilterVariable` values not equal to the value specified with `NE`. | |
1100 | |
1101 :type prefix: string | |
1102 :param prefix: | |
1103 A string that is found at the beginning of a variable, such as `Name` | |
1104 or `Id`. | |
1105 | |
1106 For example, an `MLModel` could have the `Name` | |
1107 `2014-09-09-HolidayGiftMailer`. To search for this `MLModel`, | |
1108 select `Name` for the `FilterVariable` and any of the following | |
1109 strings for the `Prefix`: | |
1110 | |
1111 | |
1112 + 2014-09 | |
1113 + 2014-09-09 | |
1114 + 2014-09-09-Holiday | |
1115 | |
1116 :type sort_order: string | |
1117 :param sort_order: A two-value parameter that determines the sequence | |
1118 of the resulting list of `MLModel`. | |
1119 | |
1120 + `asc` - Arranges the list in ascending order (A-Z, 0-9). | |
1121 + `dsc` - Arranges the list in descending order (Z-A, 9-0). | |
1122 | |
1123 | |
1124 Results are sorted by `FilterVariable`. | |
1125 | |
1126 :type next_token: string | |
1127 :param next_token: The ID of the page in the paginated results. | |
1128 | |
1129 :type limit: integer | |
1130 :param limit: The number of pages of information to include in the | |
1131 result. The range of acceptable values is 1 through 100. The | |
1132 default value is 100. | |
1133 | |
1134 """ | |
1135 params = {} | |
1136 if filter_variable is not None: | |
1137 params['FilterVariable'] = filter_variable | |
1138 if eq is not None: | |
1139 params['EQ'] = eq | |
1140 if gt is not None: | |
1141 params['GT'] = gt | |
1142 if lt is not None: | |
1143 params['LT'] = lt | |
1144 if ge is not None: | |
1145 params['GE'] = ge | |
1146 if le is not None: | |
1147 params['LE'] = le | |
1148 if ne is not None: | |
1149 params['NE'] = ne | |
1150 if prefix is not None: | |
1151 params['Prefix'] = prefix | |
1152 if sort_order is not None: | |
1153 params['SortOrder'] = sort_order | |
1154 if next_token is not None: | |
1155 params['NextToken'] = next_token | |
1156 if limit is not None: | |
1157 params['Limit'] = limit | |
1158 return self.make_request(action='DescribeMLModels', | |
1159 body=json.dumps(params)) | |
1160 | |
1161 def get_batch_prediction(self, batch_prediction_id): | |
1162 """ | |
1163 Returns a `BatchPrediction` that includes detailed metadata, | |
1164 status, and data file information for a `Batch Prediction` | |
1165 request. | |
1166 | |
1167 :type batch_prediction_id: string | |
1168 :param batch_prediction_id: An ID assigned to the `BatchPrediction` at | |
1169 creation. | |
1170 | |
1171 """ | |
1172 params = {'BatchPredictionId': batch_prediction_id, } | |
1173 return self.make_request(action='GetBatchPrediction', | |
1174 body=json.dumps(params)) | |
1175 | |
1176 def get_data_source(self, data_source_id, verbose=None): | |
1177 """ | |
1178 Returns a `DataSource` that includes metadata and data file | |
1179 information, as well as the current status of the | |
1180 `DataSource`. | |
1181 | |
1182 `GetDataSource` provides results in normal or verbose format. | |
1183 The verbose format adds the schema description and the list of | |
1184 files pointed to by the DataSource to the normal format. | |
1185 | |
1186 :type data_source_id: string | |
1187 :param data_source_id: The ID assigned to the `DataSource` at creation. | |
1188 | |
1189 :type verbose: boolean | |
1190 :param verbose: Specifies whether the `GetDataSource` operation should | |
1191 return `DataSourceSchema`. | |
1192 If true, `DataSourceSchema` is returned. | |
1193 | |
1194 If false, `DataSourceSchema` is not returned. | |
1195 | |
1196 """ | |
1197 params = {'DataSourceId': data_source_id, } | |
1198 if verbose is not None: | |
1199 params['Verbose'] = verbose | |
1200 return self.make_request(action='GetDataSource', | |
1201 body=json.dumps(params)) | |
1202 | |
1203 def get_evaluation(self, evaluation_id): | |
1204 """ | |
1205 Returns an `Evaluation` that includes metadata as well as the | |
1206 current status of the `Evaluation`. | |
1207 | |
1208 :type evaluation_id: string | |
1209 :param evaluation_id: The ID of the `Evaluation` to retrieve. The | |
1210 evaluation of each `MLModel` is recorded and cataloged. The ID | |
1211 provides the means to access the information. | |
1212 | |
1213 """ | |
1214 params = {'EvaluationId': evaluation_id, } | |
1215 return self.make_request(action='GetEvaluation', | |
1216 body=json.dumps(params)) | |
1217 | |
1218 def get_ml_model(self, ml_model_id, verbose=None): | |
1219 """ | |
1220 Returns an `MLModel` that includes detailed metadata, and data | |
1221 source information as well as the current status of the | |
1222 `MLModel`. | |
1223 | |
1224 `GetMLModel` provides results in normal or verbose format. | |
1225 | |
1226 :type ml_model_id: string | |
1227 :param ml_model_id: The ID assigned to the `MLModel` at creation. | |
1228 | |
1229 :type verbose: boolean | |
1230 :param verbose: Specifies whether the `GetMLModel` operation should | |
1231 return `Recipe`. | |
1232 If true, `Recipe` is returned. | |
1233 | |
1234 If false, `Recipe` is not returned. | |
1235 | |
1236 """ | |
1237 params = {'MLModelId': ml_model_id, } | |
1238 if verbose is not None: | |
1239 params['Verbose'] = verbose | |
1240 return self.make_request(action='GetMLModel', | |
1241 body=json.dumps(params)) | |
1242 | |
1243 def predict(self, ml_model_id, record, predict_endpoint): | |
1244 """ | |
1245 Generates a prediction for the observation using the specified | |
1246 `MLModel`. | |
1247 | |
1248 | |
1249 Not all response parameters will be populated because this is | |
1250 dependent on the type of requested model. | |
1251 | |
1252 :type ml_model_id: string | |
1253 :param ml_model_id: A unique identifier of the `MLModel`. | |
1254 | |
1255 :type record: map | |
1256 :param record: A map of variable name-value pairs that represent an | |
1257 observation. | |
1258 | |
1259 :type predict_endpoint: string | |
1260 :param predict_endpoint: The endpoint to send the predict request to. | |
1261 | |
1262 """ | |
1263 predict_host = urlsplit(predict_endpoint).hostname | |
1264 if predict_host is None: | |
1265 predict_host = predict_endpoint | |
1266 | |
1267 params = { | |
1268 'MLModelId': ml_model_id, | |
1269 'Record': record, | |
1270 'PredictEndpoint': predict_host, | |
1271 } | |
1272 return self.make_request(action='Predict', | |
1273 body=json.dumps(params), | |
1274 host=predict_host) | |
1275 | |
1276 def update_batch_prediction(self, batch_prediction_id, | |
1277 batch_prediction_name): | |
1278 """ | |
1279 Updates the `BatchPredictionName` of a `BatchPrediction`. | |
1280 | |
1281 You can use the GetBatchPrediction operation to view the | |
1282 contents of the updated data element. | |
1283 | |
1284 :type batch_prediction_id: string | |
1285 :param batch_prediction_id: The ID assigned to the `BatchPrediction` | |
1286 during creation. | |
1287 | |
1288 :type batch_prediction_name: string | |
1289 :param batch_prediction_name: A new user-supplied name or description | |
1290 of the `BatchPrediction`. | |
1291 | |
1292 """ | |
1293 params = { | |
1294 'BatchPredictionId': batch_prediction_id, | |
1295 'BatchPredictionName': batch_prediction_name, | |
1296 } | |
1297 return self.make_request(action='UpdateBatchPrediction', | |
1298 body=json.dumps(params)) | |
1299 | |
1300 def update_data_source(self, data_source_id, data_source_name): | |
1301 """ | |
1302 Updates the `DataSourceName` of a `DataSource`. | |
1303 | |
1304 You can use the GetDataSource operation to view the contents | |
1305 of the updated data element. | |
1306 | |
1307 :type data_source_id: string | |
1308 :param data_source_id: The ID assigned to the `DataSource` during | |
1309 creation. | |
1310 | |
1311 :type data_source_name: string | |
1312 :param data_source_name: A new user-supplied name or description of the | |
1313 `DataSource` that will replace the current description. | |
1314 | |
1315 """ | |
1316 params = { | |
1317 'DataSourceId': data_source_id, | |
1318 'DataSourceName': data_source_name, | |
1319 } | |
1320 return self.make_request(action='UpdateDataSource', | |
1321 body=json.dumps(params)) | |
1322 | |
1323 def update_evaluation(self, evaluation_id, evaluation_name): | |
1324 """ | |
1325 Updates the `EvaluationName` of an `Evaluation`. | |
1326 | |
1327 You can use the GetEvaluation operation to view the contents | |
1328 of the updated data element. | |
1329 | |
1330 :type evaluation_id: string | |
1331 :param evaluation_id: The ID assigned to the `Evaluation` during | |
1332 creation. | |
1333 | |
1334 :type evaluation_name: string | |
1335 :param evaluation_name: A new user-supplied name or description of the | |
1336 `Evaluation` that will replace the current content. | |
1337 | |
1338 """ | |
1339 params = { | |
1340 'EvaluationId': evaluation_id, | |
1341 'EvaluationName': evaluation_name, | |
1342 } | |
1343 return self.make_request(action='UpdateEvaluation', | |
1344 body=json.dumps(params)) | |
1345 | |
1346 def update_ml_model(self, ml_model_id, ml_model_name=None, | |
1347 score_threshold=None): | |
1348 """ | |
1349 Updates the `MLModelName` and the `ScoreThreshold` of an | |
1350 `MLModel`. | |
1351 | |
1352 You can use the GetMLModel operation to view the contents of | |
1353 the updated data element. | |
1354 | |
1355 :type ml_model_id: string | |
1356 :param ml_model_id: The ID assigned to the `MLModel` during creation. | |
1357 | |
1358 :type ml_model_name: string | |
1359 :param ml_model_name: A user-supplied name or description of the | |
1360 `MLModel`. | |
1361 | |
1362 :type score_threshold: float | |
1363 :param score_threshold: The `ScoreThreshold` used in binary | |
1364 classification `MLModel` that marks the boundary between a positive | |
1365 prediction and a negative prediction. | |
1366 Output values greater than or equal to the `ScoreThreshold` receive a | |
1367 positive result from the `MLModel`, such as `True`. Output values | |
1368 less than the `ScoreThreshold` receive a negative response from the | |
1369 `MLModel`, such as `False`. | |
1370 | |
1371 """ | |
1372 params = {'MLModelId': ml_model_id, } | |
1373 if ml_model_name is not None: | |
1374 params['MLModelName'] = ml_model_name | |
1375 if score_threshold is not None: | |
1376 params['ScoreThreshold'] = score_threshold | |
1377 return self.make_request(action='UpdateMLModel', | |
1378 body=json.dumps(params)) | |
1379 | |
1380 def make_request(self, action, body, host=None): | |
1381 headers = { | |
1382 'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action), | |
1383 'Host': self.region.endpoint, | |
1384 'Content-Type': 'application/x-amz-json-1.1', | |
1385 'Content-Length': str(len(body)), | |
1386 } | |
1387 http_request_kwargs = { | |
1388 'method':'POST', 'path':'/', 'auth_path':'/', 'params':{}, | |
1389 'headers': headers, 'data':body | |
1390 } | |
1391 if host is not None: | |
1392 headers['Host'] = host | |
1393 http_request_kwargs['host'] = host | |
1394 http_request = self.build_base_http_request(**http_request_kwargs) | |
1395 response = self._mexe(http_request, sender=None, | |
1396 override_num_retries=10) | |
1397 response_body = response.read().decode('utf-8') | |
1398 boto.log.debug(response_body) | |
1399 if response.status == 200: | |
1400 if response_body: | |
1401 return json.loads(response_body) | |
1402 else: | |
1403 json_body = json.loads(response_body) | |
1404 fault_name = json_body.get('__type', None) | |
1405 exception_class = self._faults.get(fault_name, self.ResponseError) | |
1406 raise exception_class(response.status, response.reason, | |
1407 body=json_body) | |
1408 |